summaryrefslogtreecommitdiff
path: root/nova/tests
diff options
context:
space:
mode:
Diffstat (limited to 'nova/tests')
-rw-r--r--nova/tests/fixtures/__init__.py2
-rw-r--r--nova/tests/fixtures/cinder.py30
-rw-r--r--nova/tests/fixtures/filesystem.py81
-rw-r--r--nova/tests/fixtures/glance.py31
-rw-r--r--nova/tests/fixtures/libvirt.py151
-rw-r--r--nova/tests/fixtures/libvirt_data.py204
-rw-r--r--nova/tests/fixtures/libvirt_imagebackend.py18
-rw-r--r--nova/tests/fixtures/neutron.py7
-rw-r--r--nova/tests/fixtures/notifications.py6
-rw-r--r--nova/tests/fixtures/nova.py375
-rw-r--r--nova/tests/fixtures/os_brick.py3
-rw-r--r--nova/tests/fixtures/policy.py4
-rw-r--r--nova/tests/functional/api_sample_tests/api_samples/images/images-details-get-resp.json.tpl118
-rw-r--r--nova/tests/functional/api_sample_tests/api_samples/images/images-list-get-resp.json.tpl76
-rw-r--r--nova/tests/functional/api_sample_tests/api_samples/os-evacuate/v2.95/server-evacuate-find-host-req.json.tpl5
-rw-r--r--nova/tests/functional/api_sample_tests/api_samples/os-evacuate/v2.95/server-evacuate-req.json.tpl5
-rw-r--r--nova/tests/functional/api_sample_tests/api_samples/os-keypairs/v2.92/keypairs-import-post-req.json.tpl8
-rw-r--r--nova/tests/functional/api_sample_tests/api_samples/os-keypairs/v2.92/keypairs-import-post-resp.json.tpl9
-rw-r--r--nova/tests/functional/api_sample_tests/api_samples/os-keypairs/v2.92/keypairs-post-req.json.tpl7
-rw-r--r--nova/tests/functional/api_sample_tests/api_samples/os-shelve/v2.77/os-unshelve-az.json.tpl5
-rw-r--r--nova/tests/functional/api_sample_tests/api_samples/os-shelve/v2.77/os-unshelve.json.tpl4
-rw-r--r--nova/tests/functional/api_sample_tests/api_samples/os-shelve/v2.91/os-shelve.json.tpl (renamed from nova/tests/functional/api_sample_tests/api_samples/os-shelve/v2.77/os-unshelve-null.json.tpl)0
-rw-r--r--nova/tests/functional/api_sample_tests/api_samples/os-shelve/v2.91/os-unshelve-az-host.json.tpl6
-rw-r--r--nova/tests/functional/api_sample_tests/api_samples/os-shelve/v2.91/os-unshelve-az.json.tpl5
-rw-r--r--nova/tests/functional/api_sample_tests/api_samples/os-shelve/v2.91/os-unshelve-host-and-unpin-az.json.tpl6
-rw-r--r--nova/tests/functional/api_sample_tests/api_samples/os-shelve/v2.91/os-unshelve-host.json.tpl5
-rw-r--r--nova/tests/functional/api_sample_tests/api_samples/os-shelve/v2.91/os-unshelve-unpin-az.json.tpl5
-rw-r--r--nova/tests/functional/api_sample_tests/api_samples/os-shelve/v2.91/os-unshelve.json.tpl3
-rw-r--r--nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/server-action-rebuild-resp.json.tpl80
-rw-r--r--nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/server-action-rebuild.json.tpl15
-rw-r--r--nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/server-create-req.json.tpl21
-rw-r--r--nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/server-create-resp.json.tpl22
-rw-r--r--nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/server-get-resp.json.tpl81
-rw-r--r--nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/server-update-req.json.tpl8
-rw-r--r--nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/server-update-resp.json.tpl78
-rw-r--r--nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/servers-details-resp.json.tpl88
-rw-r--r--nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/servers-list-resp.json.tpl24
-rw-r--r--nova/tests/functional/api_sample_tests/test_baremetal_nodes.py2
-rw-r--r--nova/tests/functional/api_sample_tests/test_compare_result.py2
-rw-r--r--nova/tests/functional/api_sample_tests/test_create_backup.py2
-rw-r--r--nova/tests/functional/api_sample_tests/test_evacuate.py61
-rw-r--r--nova/tests/functional/api_sample_tests/test_hypervisors.py2
-rw-r--r--nova/tests/functional/api_sample_tests/test_images.py30
-rw-r--r--nova/tests/functional/api_sample_tests/test_keypairs.py63
-rw-r--r--nova/tests/functional/api_sample_tests/test_migrate_server.py3
-rw-r--r--nova/tests/functional/api_sample_tests/test_networks.py2
-rw-r--r--nova/tests/functional/api_sample_tests/test_remote_consoles.py20
-rw-r--r--nova/tests/functional/api_sample_tests/test_server_migrations.py2
-rw-r--r--nova/tests/functional/api_sample_tests/test_server_password.py2
-rw-r--r--nova/tests/functional/api_sample_tests/test_servers.py45
-rw-r--r--nova/tests/functional/api_sample_tests/test_shelve.py297
-rw-r--r--nova/tests/functional/api_sample_tests/test_simple_tenant_usage.py2
-rw-r--r--nova/tests/functional/compute/test_init_host.py2
-rw-r--r--nova/tests/functional/compute/test_live_migration.py3
-rw-r--r--nova/tests/functional/compute/test_migration_list.py6
-rw-r--r--nova/tests/functional/compute/test_resource_tracker.py13
-rw-r--r--nova/tests/functional/db/test_aggregate.py2
-rw-r--r--nova/tests/functional/db/test_compute_api.py3
-rw-r--r--nova/tests/functional/db/test_compute_node.py2
-rw-r--r--nova/tests/functional/db/test_host_mapping.py3
-rw-r--r--nova/tests/functional/db/test_instance_group.py3
-rw-r--r--nova/tests/functional/db/test_instance_mapping.py3
-rw-r--r--nova/tests/functional/db/test_quota.py3
-rw-r--r--nova/tests/functional/db/test_virtual_interface.py3
-rw-r--r--nova/tests/functional/integrated_helpers.py60
-rw-r--r--nova/tests/functional/libvirt/base.py189
-rw-r--r--nova/tests/functional/libvirt/test_device_bus_migration.py407
-rw-r--r--nova/tests/functional/libvirt/test_evacuate.py9
-rw-r--r--nova/tests/functional/libvirt/test_live_migration.py119
-rw-r--r--nova/tests/functional/libvirt/test_machine_type.py30
-rw-r--r--nova/tests/functional/libvirt/test_numa_live_migration.py12
-rw-r--r--nova/tests/functional/libvirt/test_numa_servers.py82
-rw-r--r--nova/tests/functional/libvirt/test_pci_in_placement.py1997
-rw-r--r--nova/tests/functional/libvirt/test_pci_sriov_servers.py2784
-rw-r--r--nova/tests/functional/libvirt/test_power_manage.py270
-rw-r--r--nova/tests/functional/libvirt/test_report_cpu_traits.py8
-rw-r--r--nova/tests/functional/libvirt/test_reshape.py23
-rw-r--r--nova/tests/functional/libvirt/test_uefi.py3
-rw-r--r--nova/tests/functional/libvirt/test_vgpu.py35
-rw-r--r--nova/tests/functional/libvirt/test_vpmem.py7
-rw-r--r--nova/tests/functional/libvirt/test_vtpm.py4
-rw-r--r--nova/tests/functional/notification_sample_tests/notification_sample_base.py2
-rw-r--r--nova/tests/functional/notification_sample_tests/test_compute_task.py7
-rw-r--r--nova/tests/functional/notification_sample_tests/test_instance.py32
-rw-r--r--nova/tests/functional/notification_sample_tests/test_keypair.py5
-rw-r--r--nova/tests/functional/notification_sample_tests/test_libvirt.py3
-rw-r--r--nova/tests/functional/regressions/test_bug_1554631.py3
-rw-r--r--nova/tests/functional/regressions/test_bug_1595962.py3
-rw-r--r--nova/tests/functional/regressions/test_bug_1628606.py60
-rw-r--r--nova/tests/functional/regressions/test_bug_1669054.py3
-rw-r--r--nova/tests/functional/regressions/test_bug_1713783.py6
-rw-r--r--nova/tests/functional/regressions/test_bug_1732947.py4
-rw-r--r--nova/tests/functional/regressions/test_bug_1764883.py3
-rw-r--r--nova/tests/functional/regressions/test_bug_1781286.py33
-rw-r--r--nova/tests/functional/regressions/test_bug_1823370.py3
-rw-r--r--nova/tests/functional/regressions/test_bug_1830747.py2
-rw-r--r--nova/tests/functional/regressions/test_bug_1831771.py2
-rw-r--r--nova/tests/functional/regressions/test_bug_1843090.py2
-rw-r--r--nova/tests/functional/regressions/test_bug_1843708.py6
-rw-r--r--nova/tests/functional/regressions/test_bug_1845291.py2
-rw-r--r--nova/tests/functional/regressions/test_bug_1849165.py2
-rw-r--r--nova/tests/functional/regressions/test_bug_1853009.py2
-rw-r--r--nova/tests/functional/regressions/test_bug_1862633.py2
-rw-r--r--nova/tests/functional/regressions/test_bug_1879878.py3
-rw-r--r--nova/tests/functional/regressions/test_bug_1888395.py42
-rw-r--r--nova/tests/functional/regressions/test_bug_1889108.py2
-rw-r--r--nova/tests/functional/regressions/test_bug_1890244.py96
-rw-r--r--nova/tests/functional/regressions/test_bug_1893284.py2
-rw-r--r--nova/tests/functional/regressions/test_bug_1896463.py2
-rw-r--r--nova/tests/functional/regressions/test_bug_1899835.py2
-rw-r--r--nova/tests/functional/regressions/test_bug_1902925.py5
-rw-r--r--nova/tests/functional/regressions/test_bug_1914777.py2
-rw-r--r--nova/tests/functional/regressions/test_bug_1922053.py6
-rw-r--r--nova/tests/functional/regressions/test_bug_1928063.py6
-rw-r--r--nova/tests/functional/regressions/test_bug_1937084.py2
-rw-r--r--nova/tests/functional/regressions/test_bug_1937375.py2
-rw-r--r--nova/tests/functional/regressions/test_bug_1944619.py76
-rw-r--r--nova/tests/functional/regressions/test_bug_1951656.py73
-rw-r--r--nova/tests/functional/regressions/test_bug_1978983.py71
-rw-r--r--nova/tests/functional/regressions/test_bug_1980720.py68
-rw-r--r--nova/tests/functional/regressions/test_bug_1983753.py177
-rw-r--r--nova/tests/functional/regressions/test_bug_1995153.py107
-rw-r--r--nova/tests/functional/test_aggregates.py24
-rw-r--r--nova/tests/functional/test_availability_zones.py448
-rw-r--r--nova/tests/functional/test_boot_from_volume.py42
-rw-r--r--nova/tests/functional/test_cold_migrate.py2
-rw-r--r--nova/tests/functional/test_compute_mgr.py3
-rw-r--r--nova/tests/functional/test_cross_cell_migrate.py2
-rw-r--r--nova/tests/functional/test_ephemeral_encryption.py381
-rw-r--r--nova/tests/functional/test_images.py8
-rw-r--r--nova/tests/functional/test_instance_actions.py12
-rw-r--r--nova/tests/functional/test_ip_allocation.py53
-rw-r--r--nova/tests/functional/test_monkey_patch.py45
-rw-r--r--nova/tests/functional/test_nova_manage.py2
-rw-r--r--nova/tests/functional/test_policy.py2
-rw-r--r--nova/tests/functional/test_report_client.py70
-rw-r--r--nova/tests/functional/test_routed_networks.py2
-rw-r--r--nova/tests/functional/test_server_faults.py2
-rw-r--r--nova/tests/functional/test_server_group.py80
-rw-r--r--nova/tests/functional/test_server_rescue.py86
-rw-r--r--nova/tests/functional/test_servers.py209
-rw-r--r--nova/tests/functional/test_servers_provider_tree.py6
-rw-r--r--nova/tests/functional/test_servers_resource_request.py33
-rw-r--r--nova/tests/functional/test_service.py85
-rw-r--r--nova/tests/functional/test_unified_limits.py217
-rw-r--r--nova/tests/unit/accelerator/test_cyborg.py8
-rw-r--r--nova/tests/unit/api/openstack/compute/admin_only_action_common.py3
-rw-r--r--nova/tests/unit/api/openstack/compute/test_admin_password.py4
-rw-r--r--nova/tests/unit/api/openstack/compute/test_aggregates.py3
-rw-r--r--nova/tests/unit/api/openstack/compute/test_api.py2
-rw-r--r--nova/tests/unit/api/openstack/compute/test_attach_interfaces.py3
-rw-r--r--nova/tests/unit/api/openstack/compute/test_availability_zone.py2
-rw-r--r--nova/tests/unit/api/openstack/compute/test_baremetal_nodes.py5
-rw-r--r--nova/tests/unit/api/openstack/compute/test_console_auth_tokens.py2
-rw-r--r--nova/tests/unit/api/openstack/compute/test_console_output.py2
-rw-r--r--nova/tests/unit/api/openstack/compute/test_create_backup.py7
-rw-r--r--nova/tests/unit/api/openstack/compute/test_deferred_delete.py3
-rw-r--r--nova/tests/unit/api/openstack/compute/test_disk_config.py2
-rw-r--r--nova/tests/unit/api/openstack/compute/test_evacuate.py32
-rw-r--r--nova/tests/unit/api/openstack/compute/test_flavor_access.py27
-rw-r--r--nova/tests/unit/api/openstack/compute/test_flavor_manage.py2
-rw-r--r--nova/tests/unit/api/openstack/compute/test_flavors.py2
-rw-r--r--nova/tests/unit/api/openstack/compute/test_flavors_extra_specs.py3
-rw-r--r--nova/tests/unit/api/openstack/compute/test_floating_ip_pools.py2
-rw-r--r--nova/tests/unit/api/openstack/compute/test_floating_ips.py3
-rw-r--r--nova/tests/unit/api/openstack/compute/test_hosts.py3
-rw-r--r--nova/tests/unit/api/openstack/compute/test_hypervisors.py396
-rw-r--r--nova/tests/unit/api/openstack/compute/test_image_metadata.py2
-rw-r--r--nova/tests/unit/api/openstack/compute/test_images.py2
-rw-r--r--nova/tests/unit/api/openstack/compute/test_instance_actions.py2
-rw-r--r--nova/tests/unit/api/openstack/compute/test_keypairs.py172
-rw-r--r--nova/tests/unit/api/openstack/compute/test_limits.py327
-rw-r--r--nova/tests/unit/api/openstack/compute/test_lock_server.py4
-rw-r--r--nova/tests/unit/api/openstack/compute/test_microversions.py3
-rw-r--r--nova/tests/unit/api/openstack/compute/test_migrate_server.py6
-rw-r--r--nova/tests/unit/api/openstack/compute/test_migrations.py2
-rw-r--r--nova/tests/unit/api/openstack/compute/test_multinic.py3
-rw-r--r--nova/tests/unit/api/openstack/compute/test_networks.py2
-rw-r--r--nova/tests/unit/api/openstack/compute/test_quota_classes.py224
-rw-r--r--nova/tests/unit/api/openstack/compute/test_quotas.py480
-rw-r--r--nova/tests/unit/api/openstack/compute/test_remote_consoles.py17
-rw-r--r--nova/tests/unit/api/openstack/compute/test_rescue.py2
-rw-r--r--nova/tests/unit/api/openstack/compute/test_security_groups.py3
-rw-r--r--nova/tests/unit/api/openstack/compute/test_server_actions.py41
-rw-r--r--nova/tests/unit/api/openstack/compute/test_server_diagnostics.py3
-rw-r--r--nova/tests/unit/api/openstack/compute/test_server_external_events.py5
-rw-r--r--nova/tests/unit/api/openstack/compute/test_server_group_quotas.py108
-rw-r--r--nova/tests/unit/api/openstack/compute/test_server_groups.py100
-rw-r--r--nova/tests/unit/api/openstack/compute/test_server_metadata.py3
-rw-r--r--nova/tests/unit/api/openstack/compute/test_server_migrations.py2
-rw-r--r--nova/tests/unit/api/openstack/compute/test_server_password.py2
-rw-r--r--nova/tests/unit/api/openstack/compute/test_server_reset_state.py3
-rw-r--r--nova/tests/unit/api/openstack/compute/test_server_start_stop.py3
-rw-r--r--nova/tests/unit/api/openstack/compute/test_server_tags.py3
-rw-r--r--nova/tests/unit/api/openstack/compute/test_server_topology.py3
-rw-r--r--nova/tests/unit/api/openstack/compute/test_servers.py41
-rw-r--r--nova/tests/unit/api/openstack/compute/test_services.py2
-rw-r--r--nova/tests/unit/api/openstack/compute/test_shelve.py258
-rw-r--r--nova/tests/unit/api/openstack/compute/test_simple_tenant_usage.py2
-rw-r--r--nova/tests/unit/api/openstack/compute/test_snapshots.py3
-rw-r--r--nova/tests/unit/api/openstack/compute/test_suspend_server.py3
-rw-r--r--nova/tests/unit/api/openstack/compute/test_tenant_networks.py2
-rw-r--r--nova/tests/unit/api/openstack/compute/test_volumes.py12
-rw-r--r--nova/tests/unit/api/openstack/fakes.py14
-rw-r--r--nova/tests/unit/api/openstack/test_common.py3
-rw-r--r--nova/tests/unit/api/openstack/test_faults.py3
-rw-r--r--nova/tests/unit/api/openstack/test_requestlog.py2
-rw-r--r--nova/tests/unit/api/openstack/test_wsgi.py3
-rw-r--r--nova/tests/unit/api/openstack/test_wsgi_app.py17
-rw-r--r--nova/tests/unit/api/test_auth.py3
-rw-r--r--nova/tests/unit/api/test_wsgi.py2
-rw-r--r--nova/tests/unit/api/validation/extra_specs/test_validators.py11
-rw-r--r--nova/tests/unit/cmd/test_baseproxy.py2
-rw-r--r--nova/tests/unit/cmd/test_common.py2
-rw-r--r--nova/tests/unit/cmd/test_compute.py2
-rw-r--r--nova/tests/unit/cmd/test_manage.py318
-rw-r--r--nova/tests/unit/cmd/test_nova_api.py2
-rw-r--r--nova/tests/unit/cmd/test_policy.py19
-rw-r--r--nova/tests/unit/cmd/test_scheduler.py2
-rw-r--r--nova/tests/unit/cmd/test_status.py62
-rw-r--r--nova/tests/unit/compute/monitors/cpu/test_virt_driver.py2
-rw-r--r--nova/tests/unit/compute/monitors/test_monitors.py2
-rw-r--r--nova/tests/unit/compute/provider_config_data/v1/validation_error_test_data.yaml4
-rw-r--r--nova/tests/unit/compute/test_api.py622
-rw-r--r--nova/tests/unit/compute/test_claims.py8
-rw-r--r--nova/tests/unit/compute/test_compute.py346
-rw-r--r--nova/tests/unit/compute/test_compute_mgr.py908
-rw-r--r--nova/tests/unit/compute/test_flavors.py2
-rw-r--r--nova/tests/unit/compute/test_host_api.py3
-rw-r--r--nova/tests/unit/compute/test_instance_list.py3
-rw-r--r--nova/tests/unit/compute/test_keypairs.py111
-rw-r--r--nova/tests/unit/compute/test_multi_cell_list.py3
-rw-r--r--nova/tests/unit/compute/test_pci_placement_translator.py291
-rw-r--r--nova/tests/unit/compute/test_provider_config.py14
-rw-r--r--nova/tests/unit/compute/test_resource_tracker.py543
-rw-r--r--nova/tests/unit/compute/test_rpcapi.py88
-rw-r--r--nova/tests/unit/compute/test_shelve.py644
-rw-r--r--nova/tests/unit/compute/test_utils.py70
-rw-r--r--nova/tests/unit/compute/test_virtapi.py158
-rw-r--r--nova/tests/unit/conductor/tasks/test_base.py2
-rw-r--r--nova/tests/unit/conductor/tasks/test_cross_cell_migrate.py6
-rw-r--r--nova/tests/unit/conductor/tasks/test_live_migrate.py41
-rw-r--r--nova/tests/unit/conductor/tasks/test_migrate.py3
-rw-r--r--nova/tests/unit/conductor/test_conductor.py176
-rw-r--r--nova/tests/unit/console/rfb/test_auth.py2
-rw-r--r--nova/tests/unit/console/rfb/test_authnone.py2
-rw-r--r--nova/tests/unit/console/rfb/test_authvencrypt.py2
-rw-r--r--nova/tests/unit/console/securityproxy/test_rfb.py2
-rw-r--r--nova/tests/unit/console/test_serial.py3
-rw-r--r--nova/tests/unit/console/test_websocketproxy.py67
-rw-r--r--nova/tests/unit/db/api/test_api.py2
-rw-r--r--nova/tests/unit/db/api/test_migrations.py47
-rw-r--r--nova/tests/unit/db/main/test_api.py557
-rw-r--r--nova/tests/unit/db/main/test_migrations.py89
-rw-r--r--nova/tests/unit/db/test_migration.py195
-rw-r--r--nova/tests/unit/fake_policy.py1
-rw-r--r--nova/tests/unit/fixtures/test_libvirt.py3
-rw-r--r--nova/tests/unit/image/test_glance.py2
-rw-r--r--nova/tests/unit/limit/__init__.py (renamed from nova/tests/unit/virt/powervm/disk/__init__.py)0
-rw-r--r--nova/tests/unit/limit/test_local.py256
-rw-r--r--nova/tests/unit/limit/test_placement.py353
-rw-r--r--nova/tests/unit/network/test_network_info.py46
-rw-r--r--nova/tests/unit/network/test_neutron.py1072
-rw-r--r--nova/tests/unit/network/test_os_vif_util.py33
-rw-r--r--nova/tests/unit/network/test_security_group.py3
-rw-r--r--nova/tests/unit/notifications/objects/test_flavor.py2
-rw-r--r--nova/tests/unit/notifications/objects/test_instance.py3
-rw-r--r--nova/tests/unit/notifications/objects/test_notification.py4
-rw-r--r--nova/tests/unit/notifications/objects/test_service.py2
-rw-r--r--nova/tests/unit/notifications/test_base.py2
-rw-r--r--nova/tests/unit/objects/test_aggregate.py3
-rw-r--r--nova/tests/unit/objects/test_block_device.py16
-rw-r--r--nova/tests/unit/objects/test_build_request.py3
-rw-r--r--nova/tests/unit/objects/test_cell_mapping.py3
-rw-r--r--nova/tests/unit/objects/test_compute_node.py31
-rw-r--r--nova/tests/unit/objects/test_console_auth_token.py2
-rw-r--r--nova/tests/unit/objects/test_ec2.py3
-rw-r--r--nova/tests/unit/objects/test_external_event.py2
-rw-r--r--nova/tests/unit/objects/test_fields.py6
-rw-r--r--nova/tests/unit/objects/test_flavor.py2
-rw-r--r--nova/tests/unit/objects/test_host_mapping.py3
-rw-r--r--nova/tests/unit/objects/test_image_meta.py87
-rw-r--r--nova/tests/unit/objects/test_instance.py178
-rw-r--r--nova/tests/unit/objects/test_instance_action.py2
-rw-r--r--nova/tests/unit/objects/test_instance_device_metadata.py3
-rw-r--r--nova/tests/unit/objects/test_instance_fault.py3
-rw-r--r--nova/tests/unit/objects/test_instance_group.py4
-rw-r--r--nova/tests/unit/objects/test_instance_info_cache.py27
-rw-r--r--nova/tests/unit/objects/test_instance_mapping.py3
-rw-r--r--nova/tests/unit/objects/test_instance_numa.py3
-rw-r--r--nova/tests/unit/objects/test_instance_pci_requests.py20
-rw-r--r--nova/tests/unit/objects/test_keypair.py3
-rw-r--r--nova/tests/unit/objects/test_migrate_data.py65
-rw-r--r--nova/tests/unit/objects/test_migration.py3
-rw-r--r--nova/tests/unit/objects/test_migration_context.py3
-rw-r--r--nova/tests/unit/objects/test_objects.py28
-rw-r--r--nova/tests/unit/objects/test_pci_device.py22
-rw-r--r--nova/tests/unit/objects/test_quotas.py2
-rw-r--r--nova/tests/unit/objects/test_request_spec.py287
-rw-r--r--nova/tests/unit/objects/test_resource.py3
-rw-r--r--nova/tests/unit/objects/test_security_group.py3
-rw-r--r--nova/tests/unit/objects/test_service.py3
-rw-r--r--nova/tests/unit/objects/test_tag.py2
-rw-r--r--nova/tests/unit/objects/test_task_log.py2
-rw-r--r--nova/tests/unit/objects/test_trusted_certs.py2
-rw-r--r--nova/tests/unit/objects/test_virtual_interface.py3
-rw-r--r--nova/tests/unit/objects/test_volume_usage.py3
-rw-r--r--nova/tests/unit/pci/fakes.py2
-rw-r--r--nova/tests/unit/pci/test_devspec.py277
-rw-r--r--nova/tests/unit/pci/test_manager.py351
-rw-r--r--nova/tests/unit/pci/test_request.py22
-rw-r--r--nova/tests/unit/pci/test_stats.py1234
-rw-r--r--nova/tests/unit/pci/test_utils.py183
-rw-r--r--nova/tests/unit/policies/base.py165
-rw-r--r--nova/tests/unit/policies/test_admin_actions.py75
-rw-r--r--nova/tests/unit/policies/test_admin_password.py77
-rw-r--r--nova/tests/unit/policies/test_aggregates.py157
-rw-r--r--nova/tests/unit/policies/test_assisted_volume_snapshots.py68
-rw-r--r--nova/tests/unit/policies/test_attach_interfaces.py158
-rw-r--r--nova/tests/unit/policies/test_availability_zone.py85
-rw-r--r--nova/tests/unit/policies/test_baremetal_nodes.py79
-rw-r--r--nova/tests/unit/policies/test_console_auth_tokens.py61
-rw-r--r--nova/tests/unit/policies/test_console_output.py73
-rw-r--r--nova/tests/unit/policies/test_create_backup.py77
-rw-r--r--nova/tests/unit/policies/test_deferred_delete.py95
-rw-r--r--nova/tests/unit/policies/test_evacuate.py63
-rw-r--r--nova/tests/unit/policies/test_extensions.py13
-rw-r--r--nova/tests/unit/policies/test_flavor_access.py165
-rw-r--r--nova/tests/unit/policies/test_flavor_extra_specs.py329
-rw-r--r--nova/tests/unit/policies/test_flavor_manage.py70
-rw-r--r--nova/tests/unit/policies/test_floating_ip_pools.py12
-rw-r--r--nova/tests/unit/policies/test_floating_ips.py194
-rw-r--r--nova/tests/unit/policies/test_hosts.py160
-rw-r--r--nova/tests/unit/policies/test_hypervisors.py130
-rw-r--r--nova/tests/unit/policies/test_instance_actions.py141
-rw-r--r--nova/tests/unit/policies/test_instance_usage_audit_log.py78
-rw-r--r--nova/tests/unit/policies/test_keypairs.py159
-rw-r--r--nova/tests/unit/policies/test_limits.py113
-rw-r--r--nova/tests/unit/policies/test_lock_server.py163
-rw-r--r--nova/tests/unit/policies/test_migrate_server.py92
-rw-r--r--nova/tests/unit/policies/test_migrations.py55
-rw-r--r--nova/tests/unit/policies/test_multinic.py91
-rw-r--r--nova/tests/unit/policies/test_networks.py71
-rw-r--r--nova/tests/unit/policies/test_pause_server.py86
-rw-r--r--nova/tests/unit/policies/test_quota_class_sets.py104
-rw-r--r--nova/tests/unit/policies/test_quota_sets.py205
-rw-r--r--nova/tests/unit/policies/test_remote_consoles.py73
-rw-r--r--nova/tests/unit/policies/test_rescue.py94
-rw-r--r--nova/tests/unit/policies/test_security_groups.py336
-rw-r--r--nova/tests/unit/policies/test_server_diagnostics.py77
-rw-r--r--nova/tests/unit/policies/test_server_external_events.py52
-rw-r--r--nova/tests/unit/policies/test_server_groups.py277
-rw-r--r--nova/tests/unit/policies/test_server_ips.py77
-rw-r--r--nova/tests/unit/policies/test_server_metadata.py155
-rw-r--r--nova/tests/unit/policies/test_server_migrations.py150
-rw-r--r--nova/tests/unit/policies/test_server_password.py129
-rw-r--r--nova/tests/unit/policies/test_server_tags.py150
-rw-r--r--nova/tests/unit/policies/test_server_topology.py103
-rw-r--r--nova/tests/unit/policies/test_servers.py864
-rw-r--r--nova/tests/unit/policies/test_services.py171
-rw-r--r--nova/tests/unit/policies/test_shelve.py128
-rw-r--r--nova/tests/unit/policies/test_simple_tenant_usage.py101
-rw-r--r--nova/tests/unit/policies/test_suspend_server.py89
-rw-r--r--nova/tests/unit/policies/test_tenant_networks.py80
-rw-r--r--nova/tests/unit/policies/test_volumes.py436
-rw-r--r--nova/tests/unit/privsep/test_fs.py2
-rw-r--r--nova/tests/unit/privsep/test_idmapshift.py2
-rw-r--r--nova/tests/unit/privsep/test_libvirt.py3
-rw-r--r--nova/tests/unit/privsep/test_linux_net.py2
-rw-r--r--nova/tests/unit/privsep/test_path.py3
-rw-r--r--nova/tests/unit/privsep/test_qemu.py2
-rw-r--r--nova/tests/unit/privsep/test_utils.py2
-rw-r--r--nova/tests/unit/scheduler/client/test_query.py3
-rw-r--r--nova/tests/unit/scheduler/client/test_report.py96
-rw-r--r--nova/tests/unit/scheduler/fakes.py2
-rw-r--r--nova/tests/unit/scheduler/filters/test_affinity_filters.py3
-rw-r--r--nova/tests/unit/scheduler/filters/test_aggregate_image_properties_isolation_filters.py2
-rw-r--r--nova/tests/unit/scheduler/filters/test_aggregate_instance_extra_specs_filters.py2
-rw-r--r--nova/tests/unit/scheduler/filters/test_aggregate_multitenancy_isolation_filters.py2
-rw-r--r--nova/tests/unit/scheduler/filters/test_availability_zone_filters.py2
-rw-r--r--nova/tests/unit/scheduler/filters/test_compute_capabilities_filters.py2
-rw-r--r--nova/tests/unit/scheduler/filters/test_compute_filters.py2
-rw-r--r--nova/tests/unit/scheduler/filters/test_io_ops_filters.py2
-rw-r--r--nova/tests/unit/scheduler/filters/test_num_instances_filters.py2
-rw-r--r--nova/tests/unit/scheduler/filters/test_numa_topology_filters.py97
-rw-r--r--nova/tests/unit/scheduler/filters/test_pci_passthrough_filters.py115
-rw-r--r--nova/tests/unit/scheduler/filters/test_type_filters.py2
-rw-r--r--nova/tests/unit/scheduler/test_filters.py2
-rw-r--r--nova/tests/unit/scheduler/test_host_manager.py14
-rw-r--r--nova/tests/unit/scheduler/test_manager.py910
-rw-r--r--nova/tests/unit/scheduler/test_request_filter.py125
-rw-r--r--nova/tests/unit/scheduler/test_rpcapi.py3
-rw-r--r--nova/tests/unit/scheduler/test_utils.py3
-rw-r--r--nova/tests/unit/scheduler/weights/test_weights_affinity.py2
-rw-r--r--nova/tests/unit/scheduler/weights/test_weights_hypervisor_version.py97
-rw-r--r--nova/tests/unit/scheduler/weights/test_weights_metrics.py2
-rw-r--r--nova/tests/unit/scheduler/weights/test_weights_pci.py2
-rw-r--r--nova/tests/unit/servicegroup/test_api.py2
-rw-r--r--nova/tests/unit/servicegroup/test_db_servicegroup.py3
-rw-r--r--nova/tests/unit/servicegroup/test_mc_servicegroup.py2
-rw-r--r--nova/tests/unit/storage/test_rbd.py4
-rw-r--r--nova/tests/unit/test_availability_zones.py3
-rw-r--r--nova/tests/unit/test_block_device.py3
-rw-r--r--nova/tests/unit/test_cache.py2
-rw-r--r--nova/tests/unit/test_cinder.py2
-rw-r--r--nova/tests/unit/test_conf.py2
-rw-r--r--nova/tests/unit/test_configdrive2.py2
-rw-r--r--nova/tests/unit/test_context.py4
-rw-r--r--nova/tests/unit/test_crypto.py2
-rw-r--r--nova/tests/unit/test_exception_wrapper.py2
-rw-r--r--nova/tests/unit/test_filesystem.py52
-rw-r--r--nova/tests/unit/test_fixtures.py16
-rw-r--r--nova/tests/unit/test_hacking.py46
-rw-r--r--nova/tests/unit/test_identity.py4
-rw-r--r--nova/tests/unit/test_json_ref.py2
-rw-r--r--nova/tests/unit/test_metadata.py27
-rw-r--r--nova/tests/unit/test_notifications.py4
-rw-r--r--nova/tests/unit/test_notifier.py2
-rw-r--r--nova/tests/unit/test_policy.py92
-rw-r--r--nova/tests/unit/test_quota.py266
-rw-r--r--nova/tests/unit/test_rpc.py47
-rw-r--r--nova/tests/unit/test_service.py14
-rw-r--r--nova/tests/unit/test_service_auth.py3
-rw-r--r--nova/tests/unit/test_test.py17
-rw-r--r--nova/tests/unit/test_utils.py2
-rw-r--r--nova/tests/unit/test_weights.py2
-rw-r--r--nova/tests/unit/test_wsgi.py2
-rw-r--r--nova/tests/unit/utils.py3
-rw-r--r--nova/tests/unit/virt/disk/mount/test_api.py5
-rw-r--r--nova/tests/unit/virt/disk/mount/test_loop.py3
-rw-r--r--nova/tests/unit/virt/disk/mount/test_nbd.py2
-rw-r--r--nova/tests/unit/virt/disk/test_api.py3
-rw-r--r--nova/tests/unit/virt/disk/vfs/test_guestfs.py2
-rw-r--r--nova/tests/unit/virt/hyperv/__init__.py20
-rw-r--r--nova/tests/unit/virt/hyperv/test_base.py3
-rw-r--r--nova/tests/unit/virt/hyperv/test_block_device_manager.py2
-rw-r--r--nova/tests/unit/virt/hyperv/test_driver.py2
-rw-r--r--nova/tests/unit/virt/hyperv/test_eventhandler.py2
-rw-r--r--nova/tests/unit/virt/hyperv/test_hostops.py2
-rw-r--r--nova/tests/unit/virt/hyperv/test_imagecache.py2
-rw-r--r--nova/tests/unit/virt/hyperv/test_livemigrationops.py3
-rw-r--r--nova/tests/unit/virt/hyperv/test_migrationops.py2
-rw-r--r--nova/tests/unit/virt/hyperv/test_pathutils.py3
-rw-r--r--nova/tests/unit/virt/hyperv/test_rdpconsoleops.py2
-rw-r--r--nova/tests/unit/virt/hyperv/test_serialconsolehandler.py2
-rw-r--r--nova/tests/unit/virt/hyperv/test_serialconsoleops.py2
-rw-r--r--nova/tests/unit/virt/hyperv/test_serialproxy.py2
-rw-r--r--nova/tests/unit/virt/hyperv/test_snapshotops.py2
-rw-r--r--nova/tests/unit/virt/hyperv/test_vif.py2
-rw-r--r--nova/tests/unit/virt/hyperv/test_vmops.py21
-rw-r--r--nova/tests/unit/virt/hyperv/test_volumeops.py3
-rw-r--r--nova/tests/unit/virt/ironic/test_client_wrapper.py3
-rw-r--r--nova/tests/unit/virt/ironic/test_driver.py76
-rw-r--r--nova/tests/unit/virt/libvirt/cpu/__init__.py (renamed from nova/tests/unit/virt/powervm/tasks/__init__.py)0
-rw-r--r--nova/tests/unit/virt/libvirt/cpu/test_api.py194
-rw-r--r--nova/tests/unit/virt/libvirt/cpu/test_core.py122
-rw-r--r--nova/tests/unit/virt/libvirt/storage/test_dmcrypt.py3
-rw-r--r--nova/tests/unit/virt/libvirt/storage/test_lvm.py3
-rw-r--r--nova/tests/unit/virt/libvirt/test_blockinfo.py102
-rw-r--r--nova/tests/unit/virt/libvirt/test_config.py217
-rw-r--r--nova/tests/unit/virt/libvirt/test_designer.py2
-rw-r--r--nova/tests/unit/virt/libvirt/test_driver.py1862
-rw-r--r--nova/tests/unit/virt/libvirt/test_guest.py39
-rw-r--r--nova/tests/unit/virt/libvirt/test_host.py320
-rw-r--r--nova/tests/unit/virt/libvirt/test_imagebackend.py38
-rw-r--r--nova/tests/unit/virt/libvirt/test_imagecache.py2
-rw-r--r--nova/tests/unit/virt/libvirt/test_machine_type_utils.py3
-rw-r--r--nova/tests/unit/virt/libvirt/test_migration.py44
-rw-r--r--nova/tests/unit/virt/libvirt/test_utils.py113
-rw-r--r--nova/tests/unit/virt/libvirt/test_vif.py41
-rw-r--r--nova/tests/unit/virt/libvirt/volume/test_fibrechannel.py2
-rw-r--r--nova/tests/unit/virt/libvirt/volume/test_fs.py2
-rw-r--r--nova/tests/unit/virt/libvirt/volume/test_iscsi.py2
-rw-r--r--nova/tests/unit/virt/libvirt/volume/test_lightos.py79
-rw-r--r--nova/tests/unit/virt/libvirt/volume/test_mount.py2
-rw-r--r--nova/tests/unit/virt/libvirt/volume/test_net.py2
-rw-r--r--nova/tests/unit/virt/libvirt/volume/test_nfs.py2
-rw-r--r--nova/tests/unit/virt/libvirt/volume/test_nvme.py35
-rw-r--r--nova/tests/unit/virt/libvirt/volume/test_quobyte.py2
-rw-r--r--nova/tests/unit/virt/libvirt/volume/test_remotefs.py3
-rw-r--r--nova/tests/unit/virt/libvirt/volume/test_scaleio.py2
-rw-r--r--nova/tests/unit/virt/libvirt/volume/test_smbfs.py2
-rw-r--r--nova/tests/unit/virt/libvirt/volume/test_storpool.py2
-rw-r--r--nova/tests/unit/virt/libvirt/volume/test_volume.py3
-rw-r--r--nova/tests/unit/virt/libvirt/volume/test_vzstorage.py2
-rw-r--r--nova/tests/unit/virt/powervm/__init__.py56
-rw-r--r--nova/tests/unit/virt/powervm/disk/fake_adapter.py52
-rw-r--r--nova/tests/unit/virt/powervm/disk/test_driver.py59
-rw-r--r--nova/tests/unit/virt/powervm/disk/test_localdisk.py312
-rw-r--r--nova/tests/unit/virt/powervm/disk/test_ssp.py424
-rw-r--r--nova/tests/unit/virt/powervm/tasks/test_image.py68
-rw-r--r--nova/tests/unit/virt/powervm/tasks/test_network.py323
-rw-r--r--nova/tests/unit/virt/powervm/tasks/test_storage.py354
-rw-r--r--nova/tests/unit/virt/powervm/tasks/test_vm.py134
-rw-r--r--nova/tests/unit/virt/powervm/test_driver.py649
-rw-r--r--nova/tests/unit/virt/powervm/test_host.py62
-rw-r--r--nova/tests/unit/virt/powervm/test_image.py55
-rw-r--r--nova/tests/unit/virt/powervm/test_media.py203
-rw-r--r--nova/tests/unit/virt/powervm/test_mgmt.py193
-rw-r--r--nova/tests/unit/virt/powervm/test_vif.py327
-rw-r--r--nova/tests/unit/virt/powervm/test_vm.py563
-rw-r--r--nova/tests/unit/virt/powervm/volume/__init__.py0
-rw-r--r--nova/tests/unit/virt/powervm/volume/test_fcvscsi.py456
-rw-r--r--nova/tests/unit/virt/test_block_device.py319
-rw-r--r--nova/tests/unit/virt/test_hardware.py588
-rw-r--r--nova/tests/unit/virt/test_imagecache.py5
-rw-r--r--nova/tests/unit/virt/test_images.py48
-rw-r--r--nova/tests/unit/virt/test_netutils.py23
-rw-r--r--nova/tests/unit/virt/test_node.py142
-rw-r--r--nova/tests/unit/virt/test_osinfo.py3
-rw-r--r--nova/tests/unit/virt/test_virt.py29
-rw-r--r--nova/tests/unit/virt/test_virt_drivers.py17
-rw-r--r--nova/tests/unit/virt/vmwareapi/__init__.py20
-rw-r--r--nova/tests/unit/virt/vmwareapi/fake.py203
-rw-r--r--nova/tests/unit/virt/vmwareapi/stubs.py7
-rw-r--r--nova/tests/unit/virt/vmwareapi/test_configdrive.py3
-rw-r--r--nova/tests/unit/virt/vmwareapi/test_driver_api.py65
-rw-r--r--nova/tests/unit/virt/vmwareapi/test_ds_util.py2
-rw-r--r--nova/tests/unit/virt/vmwareapi/test_imagecache.py2
-rw-r--r--nova/tests/unit/virt/vmwareapi/test_images.py10
-rw-r--r--nova/tests/unit/virt/vmwareapi/test_network_util.py10
-rw-r--r--nova/tests/unit/virt/vmwareapi/test_session.py208
-rw-r--r--nova/tests/unit/virt/vmwareapi/test_vif.py3
-rw-r--r--nova/tests/unit/virt/vmwareapi/test_vim_util.py6
-rw-r--r--nova/tests/unit/virt/vmwareapi/test_vm_util.py91
-rw-r--r--nova/tests/unit/virt/vmwareapi/test_vmops.py47
-rw-r--r--nova/tests/unit/virt/vmwareapi/test_volumeops.py192
-rw-r--r--nova/tests/unit/virt/zvm/__init__.py20
-rw-r--r--nova/tests/unit/virt/zvm/test_driver.py3
-rw-r--r--nova/tests/unit/virt/zvm/test_guest.py2
-rw-r--r--nova/tests/unit/virt/zvm/test_hypervisor.py2
-rw-r--r--nova/tests/unit/virt/zvm/test_utils.py2
-rw-r--r--nova/tests/unit/volume/test_cinder.py54
533 files changed, 30298 insertions, 12387 deletions
diff --git a/nova/tests/fixtures/__init__.py b/nova/tests/fixtures/__init__.py
index df254608fd..9ff4a2a601 100644
--- a/nova/tests/fixtures/__init__.py
+++ b/nova/tests/fixtures/__init__.py
@@ -16,6 +16,8 @@ from .cast_as_call import CastAsCallFixture # noqa: F401
from .cinder import CinderFixture # noqa: F401
from .conf import ConfFixture # noqa: F401, F403
from .cyborg import CyborgFixture # noqa: F401
+from .filesystem import SysFileSystemFixture # noqa: F401
+from .filesystem import TempFileSystemFixture # noqa: F401
from .glance import GlanceFixture # noqa: F401
from .libvirt import LibvirtFixture # noqa: F401
from .libvirt_imagebackend import LibvirtImageBackendFixture # noqa: F401
diff --git a/nova/tests/fixtures/cinder.py b/nova/tests/fixtures/cinder.py
index 97b32d9b84..025a3d8b81 100644
--- a/nova/tests/fixtures/cinder.py
+++ b/nova/tests/fixtures/cinder.py
@@ -47,6 +47,13 @@ class CinderFixture(fixtures.Fixture):
# This represents a bootable image-backed volume to test
# boot-from-volume scenarios.
IMAGE_BACKED_VOL = '6ca404f3-d844-4169-bb96-bc792f37de98'
+
+ # This represents a bootable image-backed volume to test
+ # boot-from-volume scenarios with
+ # os_require_quiesce
+ # hw_qemu_guest_agent
+ IMAGE_BACKED_VOL_QUIESCE = '6ca404f3-d844-4169-bb96-bc792f37de26'
+
# This represents a bootable image-backed volume with required traits
# as part of volume image metadata
IMAGE_WITH_TRAITS_BACKED_VOL = '6194fc02-c60e-4a01-a8e5-600798208b5f'
@@ -157,6 +164,13 @@ class CinderFixture(fixtures.Fixture):
'image_id': '155d900f-4e14-4e4c-a73d-069cbf4541e6'
}
+ if volume_id == self.IMAGE_BACKED_VOL_QUIESCE:
+ volume['bootable'] = True
+ volume['volume_image_metadata'] = {
+ "os_require_quiesce": "True",
+ "hw_qemu_guest_agent": "True"
+ }
+
if volume_id == self.IMAGE_WITH_TRAITS_BACKED_VOL:
volume['bootable'] = True
volume['volume_image_metadata'] = {
@@ -327,6 +341,16 @@ class CinderFixture(fixtures.Fixture):
_find_attachment(attachment_id)
LOG.info('Completing volume attachment: %s', attachment_id)
+ def fake_reimage_volume(*args, **kwargs):
+ if self.IMAGE_BACKED_VOL not in args:
+ raise exception.VolumeNotFound()
+ if 'reimage_reserved' not in kwargs:
+ raise exception.InvalidInput('reimage_reserved not specified')
+
+ def fake_get_absolute_limits(_self, context):
+ limits = {'totalSnapshotsUsed': 0, 'maxTotalSnapshots': -1}
+ return limits
+
self.test.stub_out(
'nova.volume.cinder.API.attachment_create', fake_attachment_create)
self.test.stub_out(
@@ -366,6 +390,12 @@ class CinderFixture(fixtures.Fixture):
self.test.stub_out(
'nova.volume.cinder.API.terminate_connection',
lambda *args, **kwargs: None)
+ self.test.stub_out(
+ 'nova.volume.cinder.API.reimage_volume',
+ fake_reimage_volume)
+ self.test.stub_out(
+ 'nova.volume.cinder.API.get_absolute_limits',
+ fake_get_absolute_limits)
def volume_ids_for_instance(self, instance_uuid):
for volume_id, attachments in self.volume_to_attachment.items():
diff --git a/nova/tests/fixtures/filesystem.py b/nova/tests/fixtures/filesystem.py
new file mode 100644
index 0000000000..932d42fe27
--- /dev/null
+++ b/nova/tests/fixtures/filesystem.py
@@ -0,0 +1,81 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import os
+import shutil
+import tempfile
+from unittest import mock
+
+import fixtures
+
+from nova import filesystem
+from nova.virt.libvirt.cpu import core
+
+
+SYS = 'sys'
+
+
+class TempFileSystemFixture(fixtures.Fixture):
+ """Creates a fake / filesystem"""
+
+ def _setUp(self):
+ self.temp_dir = tempfile.TemporaryDirectory(prefix='fake_fs')
+ # NOTE(sbauza): I/O disk errors may raise an exception here, as we
+ # don't ignore them. If that's causing a problem in our CI jobs, the
+ # recommended solution is to use shutil.rmtree instead of cleanup()
+ # with ignore_errors parameter set to True (or wait for the minimum
+ # python version to be 3.10 as TemporaryDirectory will provide
+ # ignore_cleanup_errors parameter)
+ self.addCleanup(self.temp_dir.cleanup)
+
+
+class SysFileSystemFixture(TempFileSystemFixture):
+ """Creates a fake /sys filesystem"""
+
+ def __init__(self, cpus_supported=None):
+ self.cpus_supported = cpus_supported or 10
+
+ def _setUp(self):
+ super()._setUp()
+ self.sys_path = os.path.join(self.temp_dir.name, SYS)
+ self.addCleanup(shutil.rmtree, self.sys_path, ignore_errors=True)
+
+ sys_patcher = mock.patch(
+ 'nova.filesystem.SYS',
+ new_callable=mock.PropertyMock(return_value=self.sys_path))
+ self.sys_mock = sys_patcher.start()
+ self.addCleanup(sys_patcher.stop)
+
+ avail_path_patcher = mock.patch(
+ 'nova.virt.libvirt.cpu.core.AVAILABLE_PATH',
+ new_callable=mock.PropertyMock(
+ return_value=os.path.join(self.sys_path,
+ 'devices/system/cpu/present')))
+ self.avail_path_mock = avail_path_patcher.start()
+ self.addCleanup(avail_path_patcher.stop)
+
+ cpu_path_patcher = mock.patch(
+ 'nova.virt.libvirt.cpu.core.CPU_PATH_TEMPLATE',
+ new_callable=mock.PropertyMock(
+ return_value=os.path.join(self.sys_path,
+ 'devices/system/cpu/cpu%(core)s')))
+ self.cpu_path_mock = cpu_path_patcher.start()
+ self.addCleanup(cpu_path_patcher.stop)
+
+ for cpu_nr in range(self.cpus_supported):
+ cpu_dir = os.path.join(self.cpu_path_mock % {'core': cpu_nr})
+ os.makedirs(os.path.join(cpu_dir, 'cpufreq'))
+ filesystem.write_sys(
+ os.path.join(cpu_dir, 'cpufreq/scaling_governor'),
+ data='powersave')
+ filesystem.write_sys(core.AVAILABLE_PATH,
+ f'0-{self.cpus_supported - 1}')
diff --git a/nova/tests/fixtures/glance.py b/nova/tests/fixtures/glance.py
index cf68f490b4..b718f28c2a 100644
--- a/nova/tests/fixtures/glance.py
+++ b/nova/tests/fixtures/glance.py
@@ -15,6 +15,7 @@ import datetime
import fixtures
from oslo_log import log as logging
+from oslo_utils.fixture import uuidsentinel
from oslo_utils import uuidutils
from nova import exception
@@ -198,6 +199,32 @@ class GlanceFixture(fixtures.Fixture):
},
}
+ eph_encryption = copy.deepcopy(image1)
+ eph_encryption['id'] = uuidsentinel.eph_encryption
+ eph_encryption['properties'] = {
+ 'hw_ephemeral_encryption': 'True'
+ }
+
+ eph_encryption_disabled = copy.deepcopy(image1)
+ eph_encryption_disabled['id'] = uuidsentinel.eph_encryption_disabled
+ eph_encryption_disabled['properties'] = {
+ 'hw_ephemeral_encryption': 'False'
+ }
+
+ eph_encryption_luks = copy.deepcopy(image1)
+ eph_encryption_luks['id'] = uuidsentinel.eph_encryption_luks
+ eph_encryption_luks['properties'] = {
+ 'hw_ephemeral_encryption': 'True',
+ 'hw_ephemeral_encryption_format': 'luks'
+ }
+
+ eph_encryption_plain = copy.deepcopy(image1)
+ eph_encryption_plain['id'] = uuidsentinel.eph_encryption_plain
+ eph_encryption_plain['properties'] = {
+ 'hw_ephemeral_encryption': 'True',
+ 'hw_ephemeral_encryption_format': 'plain'
+ }
+
def __init__(self, test):
super().__init__()
self.test = test
@@ -222,6 +249,10 @@ class GlanceFixture(fixtures.Fixture):
self.create(None, self.image5)
self.create(None, self.auto_disk_config_disabled_image)
self.create(None, self.auto_disk_config_enabled_image)
+ self.create(None, self.eph_encryption)
+ self.create(None, self.eph_encryption_disabled)
+ self.create(None, self.eph_encryption_luks)
+ self.create(None, self.eph_encryption_plain)
self._imagedata = {}
diff --git a/nova/tests/fixtures/libvirt.py b/nova/tests/fixtures/libvirt.py
index 0faf9eb4c5..4f48463118 100644
--- a/nova/tests/fixtures/libvirt.py
+++ b/nova/tests/fixtures/libvirt.py
@@ -18,10 +18,10 @@ import sys
import textwrap
import time
import typing as ty
+from unittest import mock
import fixtures
from lxml import etree
-import mock
from oslo_log import log as logging
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import versionutils
@@ -31,6 +31,7 @@ from nova.objects import fields as obj_fields
from nova.tests.fixtures import libvirt_data as fake_libvirt_data
from nova.virt.libvirt import config as vconfig
from nova.virt.libvirt import driver as libvirt_driver
+from nova.virt.libvirt import host
# Allow passing None to the various connect methods
@@ -275,6 +276,7 @@ class FakePCIDevice(object):
<product id='0x%(prod_id)s'>%(prod_name)s</product>
<vendor id='0x%(vend_id)s'>%(vend_name)s</vendor>
%(capability)s
+ %(vpd_capability)s
<iommuGroup number='%(iommu_group)d'>
<address domain='0x0000' bus='%(bus)#02x' slot='%(slot)#02x' function='0x%(function)d'/>
</iommuGroup>
@@ -293,13 +295,22 @@ class FakePCIDevice(object):
<availableInstances>%(instances)s</availableInstances>
</type>""".strip()) # noqa
+ vpd_cap_templ = textwrap.dedent("""
+ <capability type='vpd'>
+ <name>%(name)s</name>
+ %(fields)s
+ </capability>""".strip())
+ vpd_fields_templ = textwrap.dedent("""
+ <fields access='%(access)s'>%(section_fields)s</fields>""".strip())
+ vpd_field_templ = """<%(field_name)s>%(field_value)s</%(field_name)s>"""
+
is_capable_of_mdevs = False
def __init__(
self, dev_type, bus, slot, function, iommu_group, numa_node, *,
vf_ratio=None, multiple_gpu_types=False, generic_types=False,
parent=None, vend_id=None, vend_name=None, prod_id=None,
- prod_name=None, driver_name=None,
+ prod_name=None, driver_name=None, vpd_fields=None, mac_address=None,
):
"""Populate pci devices
@@ -321,6 +332,8 @@ class FakePCIDevice(object):
:param prod_id: (str) The product ID.
:param prod_name: (str) The product name.
:param driver_name: (str) The driver name.
+ :param mac_address: (str) The MAC of the device.
+ Used in case of SRIOV PFs
"""
self.dev_type = dev_type
@@ -339,6 +352,9 @@ class FakePCIDevice(object):
self.prod_id = prod_id
self.prod_name = prod_name
self.driver_name = driver_name
+ self.mac_address = mac_address
+
+ self.vpd_fields = vpd_fields
self.generate_xml()
@@ -352,7 +368,9 @@ class FakePCIDevice(object):
assert not self.vf_ratio, 'vf_ratio does not apply for PCI devices'
if self.dev_type in ('PF', 'VF'):
- assert self.vf_ratio, 'require vf_ratio for PFs and VFs'
+ assert (
+ self.vf_ratio is not None
+ ), 'require vf_ratio for PFs and VFs'
if self.dev_type == 'VF':
assert self.parent, 'require parent for VFs'
@@ -447,6 +465,7 @@ class FakePCIDevice(object):
'prod_name': prod_name,
'driver': driver,
'capability': capability,
+ 'vpd_capability': self.format_vpd_cap(),
'iommu_group': self.iommu_group,
'numa_node': self.numa_node,
'parent': parent,
@@ -457,9 +476,37 @@ class FakePCIDevice(object):
if self.numa_node == -1:
self.pci_device = self.pci_device.replace("<numa node='-1'/>", "")
+ def format_vpd_cap(self):
+ if not self.vpd_fields:
+ return ''
+ fields = []
+ for access_type in ('readonly', 'readwrite'):
+ section_fields = []
+ for field_name, field_value in self.vpd_fields.get(
+ access_type, {}).items():
+ section_fields.append(self.vpd_field_templ % {
+ 'field_name': field_name,
+ 'field_value': field_value,
+ })
+ if section_fields:
+ fields.append(
+ self.vpd_fields_templ % {
+ 'access': access_type,
+ 'section_fields': '\n'.join(section_fields),
+ }
+ )
+ return self.vpd_cap_templ % {
+ 'name': self.vpd_fields.get('name', ''),
+ 'fields': '\n'.join(fields)
+ }
+
def XMLDesc(self, flags):
return self.pci_device
+ @property
+ def address(self):
+ return "0000:%02x:%02x.%1x" % (self.bus, self.slot, self.function)
+
# TODO(stephenfin): Remove all of these HostFooDevicesInfo objects in favour of
# a unified devices object
@@ -487,7 +534,7 @@ class HostPCIDevicesInfo(object):
"""
self.devices = {}
- if not (num_vfs or num_pfs) and not num_mdevcap:
+ if not (num_vfs or num_pfs or num_pci) and not num_mdevcap:
return
if num_vfs and not num_pfs:
@@ -572,7 +619,7 @@ class HostPCIDevicesInfo(object):
self, dev_type, bus, slot, function, iommu_group, numa_node,
vf_ratio=None, multiple_gpu_types=False, generic_types=False,
parent=None, vend_id=None, vend_name=None, prod_id=None,
- prod_name=None, driver_name=None,
+ prod_name=None, driver_name=None, vpd_fields=None, mac_address=None,
):
pci_dev_name = _get_libvirt_nodedev_name(bus, slot, function)
@@ -593,7 +640,10 @@ class HostPCIDevicesInfo(object):
vend_name=vend_name,
prod_id=prod_id,
prod_name=prod_name,
- driver_name=driver_name)
+ driver_name=driver_name,
+ vpd_fields=vpd_fields,
+ mac_address=mac_address,
+ )
self.devices[pci_dev_name] = dev
return dev
@@ -612,6 +662,13 @@ class HostPCIDevicesInfo(object):
return [dev for dev in self.devices
if self.devices[dev].is_capable_of_mdevs]
+ def get_pci_address_mac_mapping(self):
+ return {
+ device.address: device.mac_address
+ for dev_addr, device in self.devices.items()
+ if device.mac_address
+ }
+
class FakeMdevDevice(object):
template = """
@@ -1377,21 +1434,31 @@ class Domain(object):
'Test attempts to add more than 8 PCI devices. This is '
'not supported by the fake libvirt implementation.')
nic['func'] = func
- # this branch covers most interface types with a source
- # such as linux bridge interfaces.
- if 'source' in nic:
+ if nic['type'] in ('ethernet',):
+ # this branch covers kernel ovs interfaces
nics += '''<interface type='%(type)s'>
<mac address='%(mac)s'/>
- <source %(type)s='%(source)s'/>
<target dev='tap274487d1-6%(func)s'/>
<address type='pci' domain='0x0000' bus='0x00' slot='0x03'
function='0x%(func)s'/>
</interface>''' % nic
- elif nic['type'] in ('ethernet',):
- # this branch covers kernel ovs interfaces
+ elif nic['type'] in ('vdpa',):
+ # this branch covers hardware offloaded ovs with vdpa
nics += '''<interface type='%(type)s'>
<mac address='%(mac)s'/>
+ <source dev='%(source)s'/>
+ <address type='pci' domain='0x0000' bus='0x00' slot='0x03'
+ function='0x%(func)s'/>
+ </interface>''' % nic
+ # this branch covers most interface types with a source
+ # such as linux bridge interfaces.
+ elif 'source' in nic:
+ nics += '''<interface type='%(type)s'>
+ <mac address='%(mac)s'/>
+ <source %(type)s='%(source)s'/>
<target dev='tap274487d1-6%(func)s'/>
+ <address type='pci' domain='0x0000' bus='0x00' slot='0x03'
+ function='0x%(func)s'/>
</interface>''' % nic
else:
# This branch covers the macvtap vnic-type.
@@ -1977,6 +2044,12 @@ class Connection(object):
return VIR_CPU_COMPARE_IDENTICAL
+ def compareHypervisorCPU(
+ self, emulator, arch, machine, virttype,
+ xml, flags
+ ):
+ return self.compareCPU(xml, flags)
+
def getCPUStats(self, cpuNum, flag):
if cpuNum < 2:
return {'kernel': 5664160000000,
@@ -2141,6 +2214,15 @@ class LibvirtFixture(fixtures.Fixture):
def __init__(self, stub_os_vif=True):
self.stub_os_vif = stub_os_vif
+ self.pci_address_to_mac_map = collections.defaultdict(
+ lambda: '52:54:00:1e:59:c6')
+
+ def update_sriov_mac_address_mapping(self, pci_address_to_mac_map):
+ self.pci_address_to_mac_map.update(pci_address_to_mac_map)
+
+ def fake_get_mac_by_pci_address(self, pci_addr, pf_interface=False):
+ res = self.pci_address_to_mac_map[pci_addr]
+ return res
def setUp(self):
super().setUp()
@@ -2153,27 +2235,39 @@ class LibvirtFixture(fixtures.Fixture):
self.useFixture(
fixtures.MockPatch('nova.virt.libvirt.utils.get_fs_info'))
- self.useFixture(
- fixtures.MockPatch('nova.compute.utils.get_machine_ips'))
+ self.mock_get_machine_ips = self.useFixture(
+ fixtures.MockPatch('nova.compute.utils.get_machine_ips')).mock
# libvirt driver needs to call out to the filesystem to get the
# parent_ifname for the SRIOV VFs.
+ self.mock_get_ifname_by_pci_address = self.useFixture(
+ fixtures.MockPatch(
+ "nova.pci.utils.get_ifname_by_pci_address",
+ return_value="fake_pf_interface_name",
+ )
+ ).mock
+
self.useFixture(fixtures.MockPatch(
- 'nova.pci.utils.get_ifname_by_pci_address',
- return_value='fake_pf_interface_name'))
+ 'nova.pci.utils.get_mac_by_pci_address',
+ side_effect=self.fake_get_mac_by_pci_address))
# libvirt calls out to sysfs to get the vfs ID during macvtap plug
- self.useFixture(fixtures.MockPatch(
- 'nova.pci.utils.get_vf_num_by_pci_address', return_value=1))
+ self.mock_get_vf_num_by_pci_address = self.useFixture(
+ fixtures.MockPatch(
+ 'nova.pci.utils.get_vf_num_by_pci_address', return_value=1
+ )
+ ).mock
# libvirt calls out to privsep to set the mac and vlan of a macvtap
- self.useFixture(fixtures.MockPatch(
- 'nova.privsep.linux_net.set_device_macaddr_and_vlan'))
+ self.mock_set_device_macaddr_and_vlan = self.useFixture(
+ fixtures.MockPatch(
+ 'nova.privsep.linux_net.set_device_macaddr_and_vlan')).mock
# libvirt calls out to privsep to set the port state during macvtap
# plug
- self.useFixture(fixtures.MockPatch(
- 'nova.privsep.linux_net.set_device_macaddr'))
+ self.mock_set_device_macaddr = self.useFixture(
+ fixtures.MockPatch(
+ 'nova.privsep.linux_net.set_device_macaddr')).mock
# Don't assume that the system running tests has a valid machine-id
self.useFixture(fixtures.MockPatch(
@@ -2188,8 +2282,17 @@ class LibvirtFixture(fixtures.Fixture):
# Ensure tests perform the same on all host architectures
fake_uname = os_uname(
'Linux', '', '5.4.0-0-generic', '', obj_fields.Architecture.X86_64)
- self.useFixture(
- fixtures.MockPatch('os.uname', return_value=fake_uname))
+ self.mock_uname = self.useFixture(
+ fixtures.MockPatch('os.uname', return_value=fake_uname)).mock
+
+ real_exists = os.path.exists
+
+ def fake_exists(path):
+ if path == host.SEV_KERNEL_PARAM_FILE:
+ return False
+ return real_exists(path)
+
+ self.useFixture(fixtures.MonkeyPatch('os.path.exists', fake_exists))
# ...and on all machine types
fake_loaders = [
diff --git a/nova/tests/fixtures/libvirt_data.py b/nova/tests/fixtures/libvirt_data.py
index 463cb0ae3f..f022860f61 100644
--- a/nova/tests/fixtures/libvirt_data.py
+++ b/nova/tests/fixtures/libvirt_data.py
@@ -2002,6 +2002,210 @@ _fake_NodeDevXml = {
</capability>
</device>
""",
+ # A PF with the VPD capability.
+ "pci_0000_82_00_0": """
+ <device>
+ <name>pci_0000_82_00_0</name>
+ <path>/sys/devices/pci0000:80/0000:80:03.0/0000:82:00.0</path>
+ <parent>pci_0000_80_03_0</parent>
+ <driver>
+ <name>mlx5_core</name>
+ </driver>
+ <capability type='pci'>
+ <class>0x020000</class>
+ <domain>0</domain>
+ <bus>130</bus>
+ <slot>0</slot>
+ <function>0</function>
+ <product id='0xa2d6'>MT42822 BlueField-2 integrated ConnectX-6 Dx network controller</product>
+ <vendor id='0x15b3'>Mellanox Technologies</vendor>
+ <capability type='virt_functions' maxCount='8'>
+ <address domain='0x0000' bus='0x82' slot='0x00' function='0x3'/>
+ <address domain='0x0000' bus='0x82' slot='0x00' function='0x4'/>
+ <address domain='0x0000' bus='0x82' slot='0x00' function='0x5'/>
+ <address domain='0x0000' bus='0x82' slot='0x00' function='0x6'/>
+ <address domain='0x0000' bus='0x82' slot='0x00' function='0x7'/>
+ <address domain='0x0000' bus='0x82' slot='0x01' function='0x0'/>
+ <address domain='0x0000' bus='0x82' slot='0x01' function='0x1'/>
+ <address domain='0x0000' bus='0x82' slot='0x01' function='0x2'/>
+ </capability>
+ <capability type='vpd'>
+ <name>BlueField-2 DPU 25GbE Dual-Port SFP56, Crypto Enabled, 16GB on-board DDR, 1GbE OOB management, Tall Bracket</name>
+ <fields access='readonly'>
+ <change_level>B1</change_level>
+ <manufacture_id>foobar</manufacture_id>
+ <part_number>MBF2H332A-AEEOT</part_number>
+ <serial_number>MT2113X00000</serial_number>
+ <vendor_field index='0'>PCIeGen4 x8</vendor_field>
+ <vendor_field index='2'>MBF2H332A-AEEOT</vendor_field>
+ <vendor_field index='3'>3c53d07eec484d8aab34dabd24fe575aa</vendor_field>
+ <vendor_field index='A'>MLX:MN=MLNX:CSKU=V2:UUID=V3:PCI=V0:MODL=BF2H332A</vendor_field>
+ </fields>
+ <fields access='readwrite'>
+ <asset_tag>fooasset</asset_tag>
+ <vendor_field index='0'>vendorfield0</vendor_field>
+ <vendor_field index='2'>vendorfield2</vendor_field>
+ <vendor_field index='A'>vendorfieldA</vendor_field>
+ <system_field index='B'>systemfieldB</system_field>
+ <system_field index='0'>systemfield0</system_field>
+ </fields>
+ </capability>
+ <iommuGroup number='65'>
+ <address domain='0x0000' bus='0x82' slot='0x00' function='0x0'/>
+ </iommuGroup>
+ <numa node='1'/>
+ <pci-express>
+ <link validity='cap' port='0' speed='16' width='8'/>
+ <link validity='sta' speed='8' width='8'/>
+ </pci-express>
+ </capability>
+ </device>""", # noqa:E501
+ # A VF without the VPD capability with a PF that has a VPD capability.
+ "pci_0000_82_00_3": """
+ <device>
+ <name>pci_0000_82_00_3</name>
+ <path>/sys/devices/pci0000:80/0000:80:03.0/0000:82:00.3</path>
+ <parent>pci_0000_80_03_0</parent>
+ <driver>
+ <name>mlx5_core</name>
+ </driver>
+ <capability type='pci'>
+ <class>0x020000</class>
+ <domain>0</domain>
+ <bus>130</bus>
+ <slot>0</slot>
+ <function>3</function>
+ <product id='0x101e'>ConnectX Family mlx5Gen Virtual Function</product>
+ <vendor id='0x15b3'>Mellanox Technologies</vendor>
+ <capability type='phys_function'>
+ <address domain='0x0000' bus='0x82' slot='0x00' function='0x0'/>
+ </capability>
+ <iommuGroup number='99'>
+ <address domain='0x0000' bus='0x82' slot='0x00' function='0x3'/>
+ </iommuGroup>
+ <numa node='1'/>
+ <pci-express>
+ <link validity='cap' port='0' speed='16' width='8'/>
+ <link validity='sta' width='0'/>
+ </pci-express>
+ </capability>
+ </device>""",
+ # A VF with the VPD capability but without a parent defined in test data
+ # so that the VPD cap is extracted from the VF directly.
+ "pci_0001_82_00_3": """
+ <device>
+ <name>pci_0001_82_00_3</name>
+ <path>/sys/devices/pci0001:80/0001:80:03.0/0001:82:00.3</path>
+ <parent>pci_0001_80_03_0</parent>
+ <driver>
+ <name>mlx5_core</name>
+ </driver>
+ <capability type='pci'>
+ <class>0x020000</class>
+ <domain>1</domain>
+ <bus>130</bus>
+ <slot>0</slot>
+ <function>3</function>
+ <product id='0x101e'>ConnectX Family mlx5Gen Virtual Function</product>
+ <vendor id='0x15b3'>Mellanox Technologies</vendor>
+ <capability type='phys_function'>
+ <address domain='0x0001' bus='0x82' slot='0x00' function='0x0'/>
+ </capability>
+ <capability type='vpd'>
+ <name>BlueField-2 DPU 25GbE Dual-Port SFP56, Crypto Enabled, 16GB on-board DDR, 1GbE OOB management, Tall Bracket</name>
+ <fields access='readonly'>
+ <change_level>B1</change_level>
+ <part_number>MBF2H332A-AEEOT</part_number>
+ <serial_number>MT2113XBEEF0</serial_number>
+ <vendor_field index='2'>MBF2H332A-AEEOT</vendor_field>
+ <vendor_field index='3'>9644e3586190eb118000b8cef671bf3e</vendor_field>
+ <vendor_field index='A'>MLX:MN=MLNX:CSKU=V2:UUID=V3:PCI=V0:MODL=BF2H332A</vendor_field>
+ <vendor_field index='0'>PCIeGen4 x8</vendor_field>
+ </fields>
+ </capability>
+ <iommuGroup number='99'>
+ <address domain='0x0001' bus='0x82' slot='0x00' function='0x3'/>
+ </iommuGroup>
+ <numa node='1'/>
+ <pci-express>
+ <link validity='cap' port='0' speed='16' width='8'/>
+ <link validity='sta' width='0'/>
+ </pci-express>
+ </capability>
+ </device>""", # noqa:E501
+ # A VF without the VPD capability and without a parent PF defined
+ # in the test data.
+ "pci_0002_82_00_3": """
+ <device>
+ <name>pci_0002_82_00_3</name>
+ <path>/sys/devices/pci0002:80/0002:80:03.0/0002:82:00.3</path>
+ <parent>pci_0002_80_03_0</parent>
+ <driver>
+ <name>mlx5_core</name>
+ </driver>
+ <capability type='pci'>
+ <class>0x020000</class>
+ <domain>2</domain>
+ <bus>130</bus>
+ <slot>0</slot>
+ <function>3</function>
+ <product id='0x101e'>ConnectX Family mlx5Gen Virtual Function</product>
+ <vendor id='0x15b3'>Mellanox Technologies</vendor>
+ <capability type='phys_function'>
+ <address domain='0x0002' bus='0x82' slot='0x00' function='0x0'/>
+ </capability>
+ <iommuGroup number='99'>
+ <address domain='0x0002' bus='0x82' slot='0x00' function='0x3'/>
+ </iommuGroup>
+ <numa node='1'/>
+ <pci-express>
+ <link validity='cap' port='0' speed='16' width='8'/>
+ <link validity='sta' width='0'/>
+ </pci-express>
+ </capability>
+ </device>""",
+ "net_enp130s0f0v0_36_33_10_a3_94_64": """
+ <device>
+ <name>net_enp130s0f0v0_36_33_10_a3_94_64</name>
+ <path>/sys/devices/pci0000:80/0000:80:03.0/0000:82:00.3/net/enp130s0f0v0</path>
+ <parent>pci_0000_82_00_3</parent>
+ <capability type='net'>
+ <interface>enp130s0f0v0</interface>
+ <address>36:33:10:a3:94:64</address>
+ <link state='down'/>
+ <feature name='rx'/>
+ <feature name='tx'/>
+ <feature name='sg'/>
+ <feature name='tso'/>
+ <feature name='gso'/>
+ <feature name='gro'/>
+ <feature name='rxvlan'/>
+ <feature name='txvlan'/>
+ <feature name='rxhash'/>
+ <capability type='80203'/>
+ </capability>
+ </device>""", # noqa:E501
+ "net_enp130s0f0v0_36_33_10_a3_94_65": """
+ <device>
+ <name>net_enp130s0f0v0_36_33_10_a3_94_64</name>
+ <path>/sys/devices/pci0002:80/0002:80:03.0/0002:82:00.3/net/enp130s0f0v0</path>
+ <parent>pci_0002_82_00_3</parent>
+ <capability type='net'>
+ <interface>enp130s0f0v0</interface>
+ <address>36:33:10:a3:94:65</address>
+ <link state='down'/>
+ <feature name='rx'/>
+ <feature name='tx'/>
+ <feature name='sg'/>
+ <feature name='tso'/>
+ <feature name='gso'/>
+ <feature name='gro'/>
+ <feature name='rxvlan'/>
+ <feature name='txvlan'/>
+ <feature name='rxhash'/>
+ <capability type='80203'/>
+ </capability>
+ </device>""", # noqa:E501
}
_fake_NodeDevXml_parents = {
diff --git a/nova/tests/fixtures/libvirt_imagebackend.py b/nova/tests/fixtures/libvirt_imagebackend.py
index c3b6f7898e..4ce3f03710 100644
--- a/nova/tests/fixtures/libvirt_imagebackend.py
+++ b/nova/tests/fixtures/libvirt_imagebackend.py
@@ -16,9 +16,9 @@
import collections
import functools
import os
+from unittest import mock
import fixtures
-import mock
from nova.virt.libvirt import config
from nova.virt.libvirt import driver
@@ -154,7 +154,9 @@ class LibvirtImageBackendFixture(fixtures.Fixture):
# their construction. Tests can use this to assert that disks were
# created of the expected type.
- def image_init(instance=None, disk_name=None, path=None):
+ def image_init(
+ instance=None, disk_name=None, path=None, disk_info_mapping=None
+ ):
# There's nothing special about this path except that it's
# predictable and unique for (instance, disk).
if path is None:
@@ -169,6 +171,7 @@ class LibvirtImageBackendFixture(fixtures.Fixture):
# the real constructor.
setattr(disk, 'path', path)
setattr(disk, 'is_block_dev', mock.sentinel.is_block_dev)
+ setattr(disk, 'disk_info_mapping', disk_info_mapping)
# Used by tests. Note that image_init is a closure over image_type.
setattr(disk, 'image_type', image_type)
@@ -187,6 +190,9 @@ class LibvirtImageBackendFixture(fixtures.Fixture):
# Set the SUPPORTS_CLONE member variable to mimic the Image base
# class.
image_init.SUPPORTS_CLONE = False
+ # Set the SUPPORTS_LUKS member variable to mimic the Image base
+ # class.
+ image_init.SUPPORTS_LUKS = False
# Ditto for the 'is_shared_block_storage' and
# 'is_file_in_instance_path' functions
@@ -217,16 +223,16 @@ class LibvirtImageBackendFixture(fixtures.Fixture):
self.imported_files.append((local_filename, remote_filename))
def _fake_libvirt_info(
- self, mock_disk, disk_info, cache_mode, extra_specs, disk_unit=None,
+ self, mock_disk, cache_mode, extra_specs, disk_unit=None,
boot_order=None,
):
# For tests in test_virt_drivers which expect libvirt_info to be
# functional
info = config.LibvirtConfigGuestDisk()
info.source_type = 'file'
- info.source_device = disk_info['type']
- info.target_bus = disk_info['bus']
- info.target_dev = disk_info['dev']
+ info.source_device = mock_disk.disk_info_mapping['type']
+ info.target_bus = mock_disk.disk_info_mapping['bus']
+ info.target_dev = mock_disk.disk_info_mapping['dev']
info.driver_cache = cache_mode
info.driver_format = 'raw'
info.source_path = mock_disk.path
diff --git a/nova/tests/fixtures/neutron.py b/nova/tests/fixtures/neutron.py
index 681d52601d..a41007b83c 100644
--- a/nova/tests/fixtures/neutron.py
+++ b/nova/tests/fixtures/neutron.py
@@ -730,19 +730,22 @@ class NeutronFixture(fixtures.Fixture):
self._validate_port_binding(port_id, host_id)
del self._port_bindings[port_id][host_id]
- def _activate_port_binding(self, port_id, host_id):
+ def _activate_port_binding(self, port_id, host_id, modify_port=False):
# It makes sure that only one binding is active for a port
for host, binding in self._port_bindings[port_id].items():
if host == host_id:
# NOTE(gibi): neutron returns 409 if this binding is already
# active but nova does not depend on this behaviour yet.
binding['status'] = 'ACTIVE'
+ if modify_port:
+ # We need to ensure that port's binding:host_id is valid
+ self._merge_in_active_binding(self._ports[port_id])
else:
binding['status'] = 'INACTIVE'
def activate_port_binding(self, port_id, host_id):
self._validate_port_binding(port_id, host_id)
- self._activate_port_binding(port_id, host_id)
+ self._activate_port_binding(port_id, host_id, modify_port=True)
def show_port_binding(self, port_id, host_id):
self._validate_port_binding(port_id, host_id)
diff --git a/nova/tests/fixtures/notifications.py b/nova/tests/fixtures/notifications.py
index c46b3a919d..817982d4ff 100644
--- a/nova/tests/fixtures/notifications.py
+++ b/nova/tests/fixtures/notifications.py
@@ -39,7 +39,7 @@ class _Sub(object):
def received(self, notification):
with self._cond:
self._notifications.append(notification)
- self._cond.notifyAll()
+ self._cond.notify_all()
def wait_n(self, n, event, timeout):
"""Wait until at least n notifications have been received, and return
@@ -170,8 +170,8 @@ class FakeVersionedNotifier(FakeNotifier):
'test case which is different from the currently running test '
'case %s. This notification is ignored. The sender test case '
'probably leaked a running eventlet that emitted '
- 'notifications after the test case finished. Now this eventlet'
- 'is terminated by raising this exception.' %
+ 'notifications after the test case finished. Now this '
+ 'eventlet is terminated by raising this exception.' %
(event_type, sender_test_case_id, self.test_case_id))
payload = self._serializer.serialize_entity(ctxt, payload)
diff --git a/nova/tests/fixtures/nova.py b/nova/tests/fixtures/nova.py
index ef873f6654..abfc3ecc6c 100644
--- a/nova/tests/fixtures/nova.py
+++ b/nova/tests/fixtures/nova.py
@@ -20,14 +20,17 @@ import collections
import contextlib
from contextlib import contextmanager
import functools
+from importlib.abc import MetaPathFinder
import logging as std_logging
import os
+import sys
+import time
+from unittest import mock
import warnings
import eventlet
import fixtures
import futurist
-import mock
from openstack import service_description
from oslo_concurrency import lockutils
from oslo_config import cfg
@@ -62,6 +65,7 @@ from nova.scheduler import weights
from nova import service
from nova.tests.functional.api import client
from nova import utils
+from nova.virt import node
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
@@ -451,6 +455,13 @@ class CellDatabases(fixtures.Fixture):
# yield to do the actual work. We can do schedulable things
# here and not exclude other threads from making progress.
# If an exception is raised, we capture that and save it.
+ # Note that it is possible that another thread has changed the
+ # global state (step #2) after we released the writer lock but
+ # before we acquired the reader lock. If this happens, we will
+ # detect the global state change and retry step #2 a limited number
+ # of times. If we happen to race repeatedly with another thread and
+ # exceed our retry limit, we will give up and raise a RuntimeError,
+ # which will fail the test.
# 4. If we changed state in #2, we need to change it back. So we grab
# a writer lock again and do that.
# 5. Finally, if an exception was raised in #3 while state was
@@ -469,29 +480,47 @@ class CellDatabases(fixtures.Fixture):
raised_exc = None
- with self._cell_lock.write_lock():
- if cell_mapping is not None:
- # This assumes the next local DB access is the same cell that
- # was targeted last time.
- self._last_ctxt_mgr = desired
+ def set_last_ctxt_mgr():
+ with self._cell_lock.write_lock():
+ if cell_mapping is not None:
+ # This assumes the next local DB access is the same cell
+ # that was targeted last time.
+ self._last_ctxt_mgr = desired
- with self._cell_lock.read_lock():
- if self._last_ctxt_mgr != desired:
- # NOTE(danms): This is unlikely to happen, but it's possible
- # another waiting writer changed the state between us letting
- # it go and re-acquiring as a reader. If lockutils supported
- # upgrading and downgrading locks, this wouldn't be a problem.
- # Regardless, assert that it is still as we left it here
- # so we don't hit the wrong cell. If this becomes a problem,
- # we just need to retry the write section above until we land
- # here with the cell we want.
- raise RuntimeError('Global DB state changed underneath us')
+ # Set last context manager to the desired cell's context manager.
+ set_last_ctxt_mgr()
+ # Retry setting the last context manager if we detect that a writer
+ # changed global DB state before we take the read lock.
+ for retry_time in range(0, 3):
try:
- with self._real_target_cell(context, cell_mapping) as ccontext:
- yield ccontext
- except Exception as exc:
- raised_exc = exc
+ with self._cell_lock.read_lock():
+ if self._last_ctxt_mgr != desired:
+ # NOTE(danms): This is unlikely to happen, but it's
+ # possible another waiting writer changed the state
+ # between us letting it go and re-acquiring as a
+ # reader. If lockutils supported upgrading and
+ # downgrading locks, this wouldn't be a problem.
+ # Regardless, assert that it is still as we left it
+ # here so we don't hit the wrong cell. If this becomes
+ # a problem, we just need to retry the write section
+ # above until we land here with the cell we want.
+ raise RuntimeError(
+ 'Global DB state changed underneath us')
+ try:
+ with self._real_target_cell(
+ context, cell_mapping
+ ) as ccontext:
+ yield ccontext
+ except Exception as exc:
+ raised_exc = exc
+ # Leave the retry loop after calling target_cell
+ break
+ except RuntimeError:
+ # Give other threads a chance to make progress, increasing the
+ # wait time between attempts.
+ time.sleep(retry_time)
+ set_last_ctxt_mgr()
with self._cell_lock.write_lock():
# Once we have returned from the context, we need
@@ -537,11 +566,10 @@ class CellDatabases(fixtures.Fixture):
call_monitor_timeout=None):
"""Mirror rpc.get_client() but with our special sauce."""
serializer = CheatingSerializer(serializer)
- return messaging.RPCClient(rpc.TRANSPORT,
- target,
- version_cap=version_cap,
- serializer=serializer,
- call_monitor_timeout=call_monitor_timeout)
+ return messaging.get_rpc_client(rpc.TRANSPORT, target,
+ version_cap=version_cap,
+ serializer=serializer,
+ call_monitor_timeout=call_monitor_timeout)
def add_cell_database(self, connection_str, default=False):
"""Add a cell database to the fixture.
@@ -780,7 +808,7 @@ class WarningsFixture(fixtures.Fixture):
"""Filters out warnings during test runs."""
def setUp(self):
- super(WarningsFixture, self).setUp()
+ super().setUp()
self._original_warning_filters = warnings.filters[:]
@@ -793,15 +821,19 @@ class WarningsFixture(fixtures.Fixture):
# forward on is_admin, the deprecation is definitely really premature.
warnings.filterwarnings(
'ignore',
- message='Policy enforcement is depending on the value of is_admin.'
- ' This key is deprecated. Please update your policy '
- 'file to use the standard policy values.')
+ message=(
+ 'Policy enforcement is depending on the value of is_admin. '
+ 'This key is deprecated. Please update your policy '
+ 'file to use the standard policy values.'
+ ),
+ )
# NOTE(mriedem): Ignore scope check UserWarnings from oslo.policy.
warnings.filterwarnings(
'ignore',
message="Policy .* failed scope check",
- category=UserWarning)
+ category=UserWarning,
+ )
# NOTE(gibi): The UUIDFields emits a warning if the value is not a
# valid UUID. Let's escalate that to an exception in the test to
@@ -813,70 +845,36 @@ class WarningsFixture(fixtures.Fixture):
# how to handle (or isn't given a fallback callback).
warnings.filterwarnings(
'error',
- message="Cannot convert <oslo_db.sqlalchemy.enginefacade"
- "._Default object at ",
- category=UserWarning)
-
- warnings.filterwarnings(
- 'error', message='Evaluating non-mapped column expression',
- category=sqla_exc.SAWarning)
+ message=(
+ 'Cannot convert <oslo_db.sqlalchemy.enginefacade._Default '
+ 'object at '
+ ),
+ category=UserWarning,
+ )
# Enable deprecation warnings for nova itself to capture upcoming
# SQLAlchemy changes
warnings.filterwarnings(
'ignore',
- category=sqla_exc.SADeprecationWarning)
+ category=sqla_exc.SADeprecationWarning,
+ )
warnings.filterwarnings(
'error',
module='nova',
- category=sqla_exc.SADeprecationWarning)
-
- # ...but filter everything out until we get around to fixing them
- # TODO(stephenfin): Fix all of these
-
- warnings.filterwarnings(
- 'ignore',
- module='nova',
- message=r'The current statement is being autocommitted .*',
- category=sqla_exc.SADeprecationWarning)
-
- warnings.filterwarnings(
- 'ignore',
- module='nova',
- message=r'The Column.copy\(\) method is deprecated .*',
- category=sqla_exc.SADeprecationWarning)
-
- warnings.filterwarnings(
- 'ignore',
- module='nova',
- message=r'The Connection.connect\(\) method is considered .*',
- category=sqla_exc.SADeprecationWarning)
-
- warnings.filterwarnings(
- 'ignore',
- module='nova',
- message=r'Using strings to indicate column or relationship .*',
- category=sqla_exc.SADeprecationWarning)
+ category=sqla_exc.SADeprecationWarning,
+ )
- warnings.filterwarnings(
- 'ignore',
- module='nova',
- message=r'Using strings to indicate relationship names .*',
- category=sqla_exc.SADeprecationWarning)
+ # Enable general SQLAlchemy warnings also to ensure we're not doing
+ # silly stuff. It's possible that we'll need to filter things out here
+ # with future SQLAlchemy versions, but that's a good thing
warnings.filterwarnings(
- 'ignore',
+ 'error',
module='nova',
- message=r'Invoking and_\(\) without arguments is deprecated, .*',
- category=sqla_exc.SADeprecationWarning)
-
- # TODO(stephenfin): Remove once we fix this in placement 5.0.2 or 6.0.0
- warnings.filterwarnings(
- 'ignore',
- message='Implicit coercion of SELECT and textual SELECT .*',
- category=sqla_exc.SADeprecationWarning)
+ category=sqla_exc.SAWarning,
+ )
self.addCleanup(self._reset_warning_filters)
@@ -1006,9 +1004,15 @@ class OSAPIFixture(fixtures.Fixture):
self.api = client.TestOpenStackClient(
'fake', base_url, project_id=self.project_id,
roles=['reader', 'member'])
+ self.alternative_api = client.TestOpenStackClient(
+ 'fake', base_url, project_id=self.project_id,
+ roles=['reader', 'member'])
self.admin_api = client.TestOpenStackClient(
'admin', base_url, project_id=self.project_id,
roles=['reader', 'member', 'admin'])
+ self.alternative_admin_api = client.TestOpenStackClient(
+ 'admin', base_url, project_id=self.project_id,
+ roles=['reader', 'member', 'admin'])
self.reader_api = client.TestOpenStackClient(
'reader', base_url, project_id=self.project_id,
roles=['reader'])
@@ -1104,9 +1108,9 @@ class PoisonFunctions(fixtures.Fixture):
# Don't poison the function if it's already mocked
import nova.virt.libvirt.host
if not isinstance(nova.virt.libvirt.host.Host._init_events, mock.Mock):
- self.useFixture(fixtures.MockPatch(
+ self.useFixture(fixtures.MonkeyPatch(
'nova.virt.libvirt.host.Host._init_events',
- side_effect=evloop))
+ evloop))
class IndirectionAPIFixture(fixtures.Fixture):
@@ -1314,6 +1318,77 @@ class PrivsepFixture(fixtures.Fixture):
nova.privsep.sys_admin_pctxt, 'client_mode', False))
+class CGroupsFixture(fixtures.Fixture):
+ """Mocks checks made for available subsystems on the host's control group.
+
+ The fixture mocks all calls made on the host to verify the capabilities
+ provided by its kernel. Through this, one can simulate the underlying
+ system hosts work on top of and have tests react to expected outcomes from
+ such.
+
+ Use sample:
+ >>> cgroups = self.useFixture(CGroupsFixture())
+ >>> cgroups = self.useFixture(CGroupsFixture(version=2))
+ >>> cgroups = self.useFixture(CGroupsFixture())
+ ... cgroups.version = 2
+
+ :attr version: Arranges mocks to simulate the host interact with nova
+ following the given version of cgroups.
+ Available values are:
+ - 0: All checks related to cgroups will return False.
+ - 1: Checks related to cgroups v1 will return True.
+ - 2: Checks related to cgroups v2 will return True.
+ Defaults to 1.
+ """
+
+ def __init__(self, version=1):
+ self._cpuv1 = None
+ self._cpuv2 = None
+
+ self._version = version
+
+ @property
+ def version(self):
+ return self._version
+
+ @version.setter
+ def version(self, value):
+ self._version = value
+ self._update_mocks()
+
+ def setUp(self):
+ super().setUp()
+ self._cpuv1 = self.useFixture(fixtures.MockPatch(
+ 'nova.virt.libvirt.host.Host._has_cgroupsv1_cpu_controller')).mock
+ self._cpuv2 = self.useFixture(fixtures.MockPatch(
+ 'nova.virt.libvirt.host.Host._has_cgroupsv2_cpu_controller')).mock
+ self._update_mocks()
+
+ def _update_mocks(self):
+ if not self._cpuv1:
+ return
+
+ if not self._cpuv2:
+ return
+
+ if self.version == 0:
+ self._cpuv1.return_value = False
+ self._cpuv2.return_value = False
+ return
+
+ if self.version == 1:
+ self._cpuv1.return_value = True
+ self._cpuv2.return_value = False
+ return
+
+ if self.version == 2:
+ self._cpuv1.return_value = False
+ self._cpuv2.return_value = True
+ return
+
+ raise ValueError(f"Unknown cgroups version: '{self.version}'.")
+
+
class NoopQuotaDriverFixture(fixtures.Fixture):
"""A fixture to run tests using the NoopQuotaDriver.
@@ -1459,7 +1534,7 @@ class AvailabilityZoneFixture(fixtures.Fixture):
``get_availability_zones``.
``get_instance_availability_zone`` will return the availability_zone
- requested when creating a server otherwise the instance.availabilty_zone
+ requested when creating a server otherwise the instance.availability_zone
or default_availability_zone is returned.
"""
@@ -1611,7 +1686,11 @@ class GenericPoisonFixture(fixtures.Fixture):
current = __import__(components[0], {}, {})
for component in components[1:]:
current = getattr(current, component)
- if not isinstance(getattr(current, attribute), mock.Mock):
+
+ # NOTE(stephenfin): There are a couple of mock libraries in use
+ # (including mocked versions of mock from oslotest) so we can't
+ # use isinstance checks here
+ if 'mock' not in str(type(getattr(current, attribute))):
self.useFixture(fixtures.MonkeyPatch(
meth, poison_configure(meth, why)))
except ImportError:
@@ -1733,3 +1812,129 @@ class ReaderWriterLock(lockutils.ReaderWriterLock):
'threading.current_thread', eventlet.getcurrent)
with mpatch if eventlet_patched else contextlib.ExitStack():
super().__init__(*a, **kw)
+
+
+class SysFsPoisonFixture(fixtures.Fixture):
+
+ def inject_poison(self, module_name, function_name):
+ import importlib
+ mod = importlib.import_module(module_name)
+ orig_f = getattr(mod, function_name)
+ if (
+ isinstance(orig_f, mock.Mock) or
+ # FIXME(gibi): Is this a bug in unittest.mock? If I remove this
+ # then LibvirtReportSevTraitsTests fails as builtins.open is mocked
+ # there at import time via @test.patch_open. That injects a
+ # MagicMock instance to builtins.open which we check here against
+ # Mock (or even MagicMock) via isinstance and that check says it is
+ # not a mock. More interestingly I cannot reproduce the same
+ # issue with @test.patch_open and isinstance in a simple python
+ # interpreter. So to make progress I'm checking the class name
+ # here instead as that works.
+ orig_f.__class__.__name__ == "MagicMock"
+ ):
+ # the target is already mocked, probably via a decorator run at
+ # import time, so we don't need to inject our poison
+ return
+
+ full_name = module_name + "." + function_name
+
+ def toxic_wrapper(*args, **kwargs):
+ path = args[0]
+ if isinstance(path, bytes):
+ pattern = b'/sys'
+ elif isinstance(path, str):
+ pattern = '/sys'
+ else:
+ # we ignore the rest of the potential pathlike types for now
+ pattern = None
+
+ if pattern and path.startswith(pattern):
+ raise Exception(
+ 'This test invokes %s on %s. It is bad, you '
+ 'should mock it.'
+ % (full_name, path)
+ )
+ else:
+ return orig_f(*args, **kwargs)
+
+ self.useFixture(fixtures.MonkeyPatch(full_name, toxic_wrapper))
+
+ def setUp(self):
+ super().setUp()
+ self.inject_poison("os.path", "isdir")
+ self.inject_poison("builtins", "open")
+ self.inject_poison("glob", "iglob")
+ self.inject_poison("os", "listdir")
+ self.inject_poison("glob", "glob")
+ # TODO(gibi): Would be good to poison these too but that makes
+ # a bunch of test to fail
+ # self.inject_poison("os.path", "exists")
+ # self.inject_poison("os", "stat")
+
+
+class ImportModulePoisonFixture(fixtures.Fixture):
+ """Poison imports of modules unsuitable for the test environment.
+
+ Examples are guestfs and libvirt. Ordinarily, these would not be installed
+ in the test environment but if they _are_ present, it can result in
+ actual calls to libvirt, for example, which could cause tests to fail.
+
+ This fixture will inspect module imports and if they are in the disallowed
+ list, it will fail the test with a helpful message about mocking needed in
+ the test.
+ """
+
+ class ForbiddenModules(MetaPathFinder):
+ def __init__(self, test, modules):
+ super().__init__()
+ self.test = test
+ self.modules = modules
+
+ def find_spec(self, fullname, path, target=None):
+ if fullname in self.modules:
+ self.test.fail_message = (
+ f"This test imports the '{fullname}' module, which it "
+ f'should not in the test environment. Please add '
+ f'appropriate mocking to this test.'
+ )
+ raise ImportError(fullname)
+
+ def __init__(self, module_names):
+ self.module_names = module_names
+ self.fail_message = ''
+ if isinstance(module_names, str):
+ self.module_names = {module_names}
+ self.meta_path_finder = self.ForbiddenModules(self, self.module_names)
+
+ def setUp(self):
+ super().setUp()
+ self.addCleanup(self.cleanup)
+ sys.meta_path.insert(0, self.meta_path_finder)
+
+ def cleanup(self):
+ sys.meta_path.remove(self.meta_path_finder)
+ # We use a flag and check it during the cleanup phase to fail the test
+ # if needed. This is done because some module imports occur inside of a
+ # try-except block that ignores all exceptions, so raising an exception
+ # there (which is also what self.assert* and self.fail() do underneath)
+ # will not work to cause a failure in the test.
+ if self.fail_message:
+ raise ImportError(self.fail_message)
+
+
+class ComputeNodeIdFixture(fixtures.Fixture):
+ def setUp(self):
+ super().setUp()
+
+ node.LOCAL_NODE_UUID = None
+ self.useFixture(fixtures.MockPatch(
+ 'nova.virt.node.read_local_node_uuid',
+ lambda: None))
+ self.useFixture(fixtures.MockPatch(
+ 'nova.virt.node.write_local_node_uuid',
+ lambda uuid: None))
+ self.useFixture(fixtures.MockPatch(
+ 'nova.compute.manager.ComputeManager.'
+ '_ensure_existing_node_identity',
+ mock.DEFAULT))
diff --git a/nova/tests/fixtures/os_brick.py b/nova/tests/fixtures/os_brick.py
index e636e8b8f5..2062e8ed14 100644
--- a/nova/tests/fixtures/os_brick.py
+++ b/nova/tests/fixtures/os_brick.py
@@ -10,8 +10,9 @@
# License for the specific language governing permissions and limitations
# under the License.
+from unittest import mock
+
import fixtures
-import mock
from os_brick.initiator import connector as brick_connector
diff --git a/nova/tests/fixtures/policy.py b/nova/tests/fixtures/policy.py
index daecc2868b..b0b844fe37 100644
--- a/nova/tests/fixtures/policy.py
+++ b/nova/tests/fixtures/policy.py
@@ -65,7 +65,7 @@ class RealPolicyFixture(fixtures.Fixture):
def add_missing_default_rules(self, rules):
"""Adds default rules and their values to the given rules dict.
- The given rulen dict may have an incomplete set of policy rules.
+ The given rules dict may have an incomplete set of policy rules.
This method will add the default policy rules and their values to
the dict. It will not override the existing rules.
"""
@@ -141,7 +141,7 @@ class OverridePolicyFixture(RealPolicyFixture):
not used. One example is when policy rules are deprecated. In that case
tests can use this fixture and verify if deprecated rules are overridden
then does nova code enforce the overridden rules not only defaults.
- As per oslo.policy deprecattion feature, if deprecated rule is overridden
+ As per oslo.policy deprecation feature, if deprecated rule is overridden
in policy file then, overridden check is used to verify the policy.
Example of usage:
diff --git a/nova/tests/functional/api_sample_tests/api_samples/images/images-details-get-resp.json.tpl b/nova/tests/functional/api_sample_tests/api_samples/images/images-details-get-resp.json.tpl
index 6b56f72139..d35850baed 100644
--- a/nova/tests/functional/api_sample_tests/api_samples/images/images-details-get-resp.json.tpl
+++ b/nova/tests/functional/api_sample_tests/api_samples/images/images-details-get-resp.json.tpl
@@ -217,6 +217,124 @@
"progress": 100,
"status": "ACTIVE",
"updated": "2011-01-01T01:02:03Z"
+ },
+ {
+ "created": "2011-01-01T01:02:03Z",
+ "id": "%(eph_encryption_id)s",
+ "links": [
+ {
+ "href": "%(versioned_compute_endpoint)s/images/%(eph_encryption_id)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(compute_endpoint)s/images/%(eph_encryption_id)s",
+ "rel": "bookmark"
+ },
+ {
+ "href": "http://glance.openstack.example.com/images/%(eph_encryption_id)s",
+ "rel": "alternate",
+ "type": "application/vnd.openstack.image"
+ }
+ ],
+ "metadata": {
+ "hw_ephemeral_encryption": "True"
+ },
+ "minDisk": 0,
+ "minRam": 0,
+ "name": "fakeimage123456",
+ "OS-EXT-IMG-SIZE:size": %(int)s,
+ "progress": 100,
+ "status": "ACTIVE",
+ "updated": "2011-01-01T01:02:03Z"
+ },
+ {
+ "created": "2011-01-01T01:02:03Z",
+ "id": "%(eph_encryption_disabled_id)s",
+ "links": [
+ {
+ "href": "%(versioned_compute_endpoint)s/images/%(eph_encryption_disabled_id)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(compute_endpoint)s/images/%(eph_encryption_disabled_id)s",
+ "rel": "bookmark"
+ },
+ {
+ "href": "http://glance.openstack.example.com/images/%(eph_encryption_disabled_id)s",
+ "rel": "alternate",
+ "type": "application/vnd.openstack.image"
+ }
+ ],
+ "metadata": {
+ "hw_ephemeral_encryption": "False"
+ },
+ "minDisk": 0,
+ "minRam": 0,
+ "name": "fakeimage123456",
+ "OS-EXT-IMG-SIZE:size": %(int)s,
+ "progress": 100,
+ "status": "ACTIVE",
+ "updated": "2011-01-01T01:02:03Z"
+ },
+ {
+ "created": "2011-01-01T01:02:03Z",
+ "id": "%(eph_encryption_luks_id)s",
+ "links": [
+ {
+ "href": "%(versioned_compute_endpoint)s/images/%(eph_encryption_luks_id)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(compute_endpoint)s/images/%(eph_encryption_luks_id)s",
+ "rel": "bookmark"
+ },
+ {
+ "href": "http://glance.openstack.example.com/images/%(eph_encryption_luks_id)s",
+ "rel": "alternate",
+ "type": "application/vnd.openstack.image"
+ }
+ ],
+ "metadata": {
+ "hw_ephemeral_encryption": "True",
+ "hw_ephemeral_encryption_format": "luks"
+ },
+ "minDisk": 0,
+ "minRam": 0,
+ "name": "fakeimage123456",
+ "OS-EXT-IMG-SIZE:size": %(int)s,
+ "progress": 100,
+ "status": "ACTIVE",
+ "updated": "2011-01-01T01:02:03Z"
+ },
+ {
+ "created": "2011-01-01T01:02:03Z",
+ "id": "%(eph_encryption_plain_id)s",
+ "links": [
+ {
+ "href": "%(versioned_compute_endpoint)s/images/%(eph_encryption_plain_id)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(compute_endpoint)s/images/%(eph_encryption_plain_id)s",
+ "rel": "bookmark"
+ },
+ {
+ "href": "http://glance.openstack.example.com/images/%(eph_encryption_plain_id)s",
+ "rel": "alternate",
+ "type": "application/vnd.openstack.image"
+ }
+ ],
+ "metadata": {
+ "hw_ephemeral_encryption": "True",
+ "hw_ephemeral_encryption_format": "plain"
+ },
+ "minDisk": 0,
+ "minRam": 0,
+ "name": "fakeimage123456",
+ "OS-EXT-IMG-SIZE:size": %(int)s,
+ "progress": 100,
+ "status": "ACTIVE",
+ "updated": "2011-01-01T01:02:03Z"
}
]
}
diff --git a/nova/tests/functional/api_sample_tests/api_samples/images/images-list-get-resp.json.tpl b/nova/tests/functional/api_sample_tests/api_samples/images/images-list-get-resp.json.tpl
index 035cc83695..dc08ba7053 100644
--- a/nova/tests/functional/api_sample_tests/api_samples/images/images-list-get-resp.json.tpl
+++ b/nova/tests/functional/api_sample_tests/api_samples/images/images-list-get-resp.json.tpl
@@ -132,6 +132,82 @@
}
],
"name": "fakeimage123456"
+ },
+ {
+ "id": "%(eph_encryption_id)s",
+ "links": [
+ {
+ "href": "%(versioned_compute_endpoint)s/images/%(eph_encryption_id)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(compute_endpoint)s/images/%(eph_encryption_id)s",
+ "rel": "bookmark"
+ },
+ {
+ "href": "http://glance.openstack.example.com/images/%(eph_encryption_id)s",
+ "rel": "alternate",
+ "type": "application/vnd.openstack.image"
+ }
+ ],
+ "name": "fakeimage123456"
+ },
+ {
+ "id": "%(eph_encryption_disabled_id)s",
+ "links": [
+ {
+ "href": "%(versioned_compute_endpoint)s/images/%(eph_encryption_disabled_id)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(compute_endpoint)s/images/%(eph_encryption_disabled_id)s",
+ "rel": "bookmark"
+ },
+ {
+ "href": "http://glance.openstack.example.com/images/%(eph_encryption_disabled_id)s",
+ "rel": "alternate",
+ "type": "application/vnd.openstack.image"
+ }
+ ],
+ "name": "fakeimage123456"
+ },
+ {
+ "id": "%(eph_encryption_luks_id)s",
+ "links": [
+ {
+ "href": "%(versioned_compute_endpoint)s/images/%(eph_encryption_luks_id)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(compute_endpoint)s/images/%(eph_encryption_luks_id)s",
+ "rel": "bookmark"
+ },
+ {
+ "href": "http://glance.openstack.example.com/images/%(eph_encryption_luks_id)s",
+ "rel": "alternate",
+ "type": "application/vnd.openstack.image"
+ }
+ ],
+ "name": "fakeimage123456"
+ },
+ {
+ "id": "%(eph_encryption_plain_id)s",
+ "links": [
+ {
+ "href": "%(versioned_compute_endpoint)s/images/%(eph_encryption_plain_id)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(compute_endpoint)s/images/%(eph_encryption_plain_id)s",
+ "rel": "bookmark"
+ },
+ {
+ "href": "http://glance.openstack.example.com/images/%(eph_encryption_plain_id)s",
+ "rel": "alternate",
+ "type": "application/vnd.openstack.image"
+ }
+ ],
+ "name": "fakeimage123456"
}
]
}
diff --git a/nova/tests/functional/api_sample_tests/api_samples/os-evacuate/v2.95/server-evacuate-find-host-req.json.tpl b/nova/tests/functional/api_sample_tests/api_samples/os-evacuate/v2.95/server-evacuate-find-host-req.json.tpl
new file mode 100644
index 0000000000..8abf0b4e18
--- /dev/null
+++ b/nova/tests/functional/api_sample_tests/api_samples/os-evacuate/v2.95/server-evacuate-find-host-req.json.tpl
@@ -0,0 +1,5 @@
+{
+ "evacuate": {
+ "adminPass": "%(adminPass)s"
+ }
+}
diff --git a/nova/tests/functional/api_sample_tests/api_samples/os-evacuate/v2.95/server-evacuate-req.json.tpl b/nova/tests/functional/api_sample_tests/api_samples/os-evacuate/v2.95/server-evacuate-req.json.tpl
new file mode 100644
index 0000000000..8abf0b4e18
--- /dev/null
+++ b/nova/tests/functional/api_sample_tests/api_samples/os-evacuate/v2.95/server-evacuate-req.json.tpl
@@ -0,0 +1,5 @@
+{
+ "evacuate": {
+ "adminPass": "%(adminPass)s"
+ }
+}
diff --git a/nova/tests/functional/api_sample_tests/api_samples/os-keypairs/v2.92/keypairs-import-post-req.json.tpl b/nova/tests/functional/api_sample_tests/api_samples/os-keypairs/v2.92/keypairs-import-post-req.json.tpl
new file mode 100644
index 0000000000..03e60c0133
--- /dev/null
+++ b/nova/tests/functional/api_sample_tests/api_samples/os-keypairs/v2.92/keypairs-import-post-req.json.tpl
@@ -0,0 +1,8 @@
+{
+ "keypair": {
+ "name": "%(keypair_name)s",
+ "type": "%(keypair_type)s",
+ "public_key": "%(public_key)s",
+ "user_id": "%(user_id)s"
+ }
+}
diff --git a/nova/tests/functional/api_sample_tests/api_samples/os-keypairs/v2.92/keypairs-import-post-resp.json.tpl b/nova/tests/functional/api_sample_tests/api_samples/os-keypairs/v2.92/keypairs-import-post-resp.json.tpl
new file mode 100644
index 0000000000..30d3fa969d
--- /dev/null
+++ b/nova/tests/functional/api_sample_tests/api_samples/os-keypairs/v2.92/keypairs-import-post-resp.json.tpl
@@ -0,0 +1,9 @@
+{
+ "keypair": {
+ "fingerprint": "%(fingerprint)s",
+ "name": "%(keypair_name)s",
+ "type": "%(keypair_type)s",
+ "public_key": "%(public_key)s",
+ "user_id": "%(user_id)s"
+ }
+}
diff --git a/nova/tests/functional/api_sample_tests/api_samples/os-keypairs/v2.92/keypairs-post-req.json.tpl b/nova/tests/functional/api_sample_tests/api_samples/os-keypairs/v2.92/keypairs-post-req.json.tpl
new file mode 100644
index 0000000000..f6a6d47b56
--- /dev/null
+++ b/nova/tests/functional/api_sample_tests/api_samples/os-keypairs/v2.92/keypairs-post-req.json.tpl
@@ -0,0 +1,7 @@
+{
+ "keypair": {
+ "name": "%(keypair_name)s",
+ "type": "%(keypair_type)s",
+ "user_id": "%(user_id)s"
+ }
+}
diff --git a/nova/tests/functional/api_sample_tests/api_samples/os-shelve/v2.77/os-unshelve-az.json.tpl b/nova/tests/functional/api_sample_tests/api_samples/os-shelve/v2.77/os-unshelve-az.json.tpl
new file mode 100644
index 0000000000..9bcd25139a
--- /dev/null
+++ b/nova/tests/functional/api_sample_tests/api_samples/os-shelve/v2.77/os-unshelve-az.json.tpl
@@ -0,0 +1,5 @@
+{
+ "%(action)s": {
+ "availability_zone": "%(availability_zone)s"
+ }
+}
diff --git a/nova/tests/functional/api_sample_tests/api_samples/os-shelve/v2.77/os-unshelve.json.tpl b/nova/tests/functional/api_sample_tests/api_samples/os-shelve/v2.77/os-unshelve.json.tpl
index 9bcd25139a..d78efa84e1 100644
--- a/nova/tests/functional/api_sample_tests/api_samples/os-shelve/v2.77/os-unshelve.json.tpl
+++ b/nova/tests/functional/api_sample_tests/api_samples/os-shelve/v2.77/os-unshelve.json.tpl
@@ -1,5 +1,3 @@
{
- "%(action)s": {
- "availability_zone": "%(availability_zone)s"
- }
+ "unshelve": null
}
diff --git a/nova/tests/functional/api_sample_tests/api_samples/os-shelve/v2.77/os-unshelve-null.json.tpl b/nova/tests/functional/api_sample_tests/api_samples/os-shelve/v2.91/os-shelve.json.tpl
index 5a19f85cff..5a19f85cff 100644
--- a/nova/tests/functional/api_sample_tests/api_samples/os-shelve/v2.77/os-unshelve-null.json.tpl
+++ b/nova/tests/functional/api_sample_tests/api_samples/os-shelve/v2.91/os-shelve.json.tpl
diff --git a/nova/tests/functional/api_sample_tests/api_samples/os-shelve/v2.91/os-unshelve-az-host.json.tpl b/nova/tests/functional/api_sample_tests/api_samples/os-shelve/v2.91/os-unshelve-az-host.json.tpl
new file mode 100644
index 0000000000..eecc4271cb
--- /dev/null
+++ b/nova/tests/functional/api_sample_tests/api_samples/os-shelve/v2.91/os-unshelve-az-host.json.tpl
@@ -0,0 +1,6 @@
+{
+ "%(action)s": {
+ "availability_zone": "%(availability_zone)s",
+ "host": "%(host)s"
+ }
+}
diff --git a/nova/tests/functional/api_sample_tests/api_samples/os-shelve/v2.91/os-unshelve-az.json.tpl b/nova/tests/functional/api_sample_tests/api_samples/os-shelve/v2.91/os-unshelve-az.json.tpl
new file mode 100644
index 0000000000..9bcd25139a
--- /dev/null
+++ b/nova/tests/functional/api_sample_tests/api_samples/os-shelve/v2.91/os-unshelve-az.json.tpl
@@ -0,0 +1,5 @@
+{
+ "%(action)s": {
+ "availability_zone": "%(availability_zone)s"
+ }
+}
diff --git a/nova/tests/functional/api_sample_tests/api_samples/os-shelve/v2.91/os-unshelve-host-and-unpin-az.json.tpl b/nova/tests/functional/api_sample_tests/api_samples/os-shelve/v2.91/os-unshelve-host-and-unpin-az.json.tpl
new file mode 100644
index 0000000000..f9d2a2b17a
--- /dev/null
+++ b/nova/tests/functional/api_sample_tests/api_samples/os-shelve/v2.91/os-unshelve-host-and-unpin-az.json.tpl
@@ -0,0 +1,6 @@
+{
+ "%(action)s": {
+ "availability_zone": null,
+ "host": "%(host)s"
+ }
+}
diff --git a/nova/tests/functional/api_sample_tests/api_samples/os-shelve/v2.91/os-unshelve-host.json.tpl b/nova/tests/functional/api_sample_tests/api_samples/os-shelve/v2.91/os-unshelve-host.json.tpl
new file mode 100644
index 0000000000..3363b524ee
--- /dev/null
+++ b/nova/tests/functional/api_sample_tests/api_samples/os-shelve/v2.91/os-unshelve-host.json.tpl
@@ -0,0 +1,5 @@
+{
+ "%(action)s": {
+ "host": "%(host)s"
+ }
+}
diff --git a/nova/tests/functional/api_sample_tests/api_samples/os-shelve/v2.91/os-unshelve-unpin-az.json.tpl b/nova/tests/functional/api_sample_tests/api_samples/os-shelve/v2.91/os-unshelve-unpin-az.json.tpl
new file mode 100644
index 0000000000..3815586c5c
--- /dev/null
+++ b/nova/tests/functional/api_sample_tests/api_samples/os-shelve/v2.91/os-unshelve-unpin-az.json.tpl
@@ -0,0 +1,5 @@
+{
+ "%(action)s": {
+ "availability_zone": null
+ }
+}
diff --git a/nova/tests/functional/api_sample_tests/api_samples/os-shelve/v2.91/os-unshelve.json.tpl b/nova/tests/functional/api_sample_tests/api_samples/os-shelve/v2.91/os-unshelve.json.tpl
new file mode 100644
index 0000000000..d78efa84e1
--- /dev/null
+++ b/nova/tests/functional/api_sample_tests/api_samples/os-shelve/v2.91/os-unshelve.json.tpl
@@ -0,0 +1,3 @@
+{
+ "unshelve": null
+}
diff --git a/nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/server-action-rebuild-resp.json.tpl b/nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/server-action-rebuild-resp.json.tpl
new file mode 100644
index 0000000000..486433733d
--- /dev/null
+++ b/nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/server-action-rebuild-resp.json.tpl
@@ -0,0 +1,80 @@
+{
+ "server": {
+ "OS-DCF:diskConfig": "AUTO",
+ "OS-EXT-AZ:availability_zone": "us-west",
+ "OS-EXT-SRV-ATTR:hostname": "%(hostname)s",
+ "OS-EXT-STS:power_state": 1,
+ "OS-EXT-STS:task_state": null,
+ "OS-EXT-STS:vm_state": "active",
+ "OS-SRV-USG:launched_at": "%(strtime)s",
+ "OS-SRV-USG:terminated_at": null,
+ "accessIPv4": "1.2.3.4",
+ "accessIPv6": "80fe::",
+ "addresses": {
+ "private": [
+ {
+ "OS-EXT-IPS-MAC:mac_addr": "00:0c:29:0d:11:74",
+ "OS-EXT-IPS:type": "fixed",
+ "addr": "192.168.1.30",
+ "version": 4
+ }
+ ]
+ },
+ "adminPass": "seekr3t",
+ "config_drive": "",
+ "created": "%(isotime)s",
+ "description": null,
+ "flavor": {
+ "disk": 1,
+ "ephemeral": 0,
+ "extra_specs": {},
+ "original_name": "m1.tiny",
+ "ram": 512,
+ "swap": 0,
+ "vcpus": 1
+ },
+ "hostId": "2091634baaccdc4c5a1d57069c833e402921df696b7f970791b12ec6",
+ "id": "%(id)s",
+ "image": {
+ "id": "%(uuid)s",
+ "links": [
+ {
+ "href": "%(compute_endpoint)s/images/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "key_name": null,
+ "links": [
+ {
+ "href": "%(versioned_compute_endpoint)s/servers/%(uuid)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(compute_endpoint)s/servers/%(id)s",
+ "rel": "bookmark"
+ }
+ ],
+ "locked": false,
+ "locked_reason": null,
+ "metadata": {
+ "meta_var": "meta_val"
+ },
+ "name": "foobar",
+ "os-extended-volumes:volumes_attached": [],
+ "progress": 0,
+ "security_groups": [
+ {
+ "name": "default"
+ }
+ ],
+ "server_groups": [],
+ "status": "ACTIVE",
+ "tags": [],
+ "tenant_id": "6f70656e737461636b20342065766572",
+ "trusted_image_certificates": null,
+ "updated": "%(isotime)s",
+ "user_data": "ZWNobyAiaGVsbG8gd29ybGQi",
+ "user_id": "fake"
+ }
+}
diff --git a/nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/server-action-rebuild.json.tpl b/nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/server-action-rebuild.json.tpl
new file mode 100644
index 0000000000..3becc83fba
--- /dev/null
+++ b/nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/server-action-rebuild.json.tpl
@@ -0,0 +1,15 @@
+{
+ "rebuild" : {
+ "accessIPv4" : "%(access_ip_v4)s",
+ "accessIPv6" : "%(access_ip_v6)s",
+ "OS-DCF:diskConfig": "AUTO",
+ "imageRef" : "%(uuid)s",
+ "name" : "%(name)s",
+ "adminPass" : "%(pass)s",
+ "hostname": "%(hostname)s",
+ "metadata" : {
+ "meta_var" : "meta_val"
+ },
+ "user_data": "ZWNobyAiaGVsbG8gd29ybGQi"
+ }
+}
diff --git a/nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/server-create-req.json.tpl b/nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/server-create-req.json.tpl
new file mode 100644
index 0000000000..f83c78fdc9
--- /dev/null
+++ b/nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/server-create-req.json.tpl
@@ -0,0 +1,21 @@
+{
+ "server" : {
+ "accessIPv4": "%(access_ip_v4)s",
+ "accessIPv6": "%(access_ip_v6)s",
+ "name" : "new-server-test",
+ "imageRef" : "%(image_id)s",
+ "flavorRef" : "1",
+ "OS-DCF:diskConfig": "AUTO",
+ "metadata" : {
+ "My Server Name" : "Apache1"
+ },
+ "security_groups": [
+ {
+ "name": "default"
+ }
+ ],
+ "user_data" : "%(user_data)s",
+ "networks": "auto",
+ "hostname": "custom-hostname.example.com"
+ }
+}
diff --git a/nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/server-create-resp.json.tpl b/nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/server-create-resp.json.tpl
new file mode 100644
index 0000000000..4b30e0cfbd
--- /dev/null
+++ b/nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/server-create-resp.json.tpl
@@ -0,0 +1,22 @@
+{
+ "server": {
+ "OS-DCF:diskConfig": "AUTO",
+ "adminPass": "%(password)s",
+ "id": "%(id)s",
+ "links": [
+ {
+ "href": "%(versioned_compute_endpoint)s/servers/%(uuid)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(compute_endpoint)s/servers/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ],
+ "security_groups": [
+ {
+ "name": "default"
+ }
+ ]
+ }
+}
diff --git a/nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/server-get-resp.json.tpl b/nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/server-get-resp.json.tpl
new file mode 100644
index 0000000000..ae2088619a
--- /dev/null
+++ b/nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/server-get-resp.json.tpl
@@ -0,0 +1,81 @@
+{
+ "server": {
+ "accessIPv4": "%(access_ip_v4)s",
+ "accessIPv6": "%(access_ip_v6)s",
+ "addresses": {
+ "private": [
+ {
+ "addr": "%(ip)s",
+ "OS-EXT-IPS-MAC:mac_addr": "00:0c:29:0d:11:74",
+ "OS-EXT-IPS:type": "fixed",
+ "version": 4
+ }
+ ]
+ },
+ "created": "%(isotime)s",
+ "description": null,
+ "locked": false,
+ "locked_reason": null,
+ "flavor": {
+ "disk": 1,
+ "ephemeral": 0,
+ "extra_specs": {},
+ "original_name": "m1.tiny",
+ "ram": 512,
+ "swap": 0,
+ "vcpus": 1
+ },
+ "hostId": "%(hostid)s",
+ "id": "%(id)s",
+ "image": {
+ "id": "%(uuid)s",
+ "links": [
+ {
+ "href": "%(compute_endpoint)s/images/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "key_name": null,
+ "links": [
+ {
+ "href": "%(versioned_compute_endpoint)s/servers/%(uuid)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(compute_endpoint)s/servers/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ],
+ "metadata": {
+ "My Server Name": "Apache1"
+ },
+ "name": "new-server-test",
+ "config_drive": "%(cdrive)s",
+ "OS-DCF:diskConfig": "AUTO",
+ "OS-EXT-AZ:availability_zone": "nova",
+ "OS-EXT-SRV-ATTR:hostname": "%(hostname)s",
+ "OS-EXT-STS:power_state": 1,
+ "OS-EXT-STS:task_state": null,
+ "OS-EXT-STS:vm_state": "active",
+ "os-extended-volumes:volumes_attached": [
+ {"id": "volume_id1", "delete_on_termination": false},
+ {"id": "volume_id2", "delete_on_termination": false}
+ ],
+ "OS-SRV-USG:launched_at": "%(strtime)s",
+ "OS-SRV-USG:terminated_at": null,
+ "progress": 0,
+ "security_groups": [
+ {
+ "name": "default"
+ }
+ ],
+ "server_groups": [],
+ "status": "ACTIVE",
+ "tags": [],
+ "tenant_id": "6f70656e737461636b20342065766572",
+ "trusted_image_certificates": null,
+ "updated": "%(isotime)s",
+ "user_id": "fake"
+ }
+}
diff --git a/nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/server-update-req.json.tpl b/nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/server-update-req.json.tpl
new file mode 100644
index 0000000000..bc4be64a8e
--- /dev/null
+++ b/nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/server-update-req.json.tpl
@@ -0,0 +1,8 @@
+{
+ "server": {
+ "accessIPv4": "%(access_ip_v4)s",
+ "accessIPv6": "%(access_ip_v6)s",
+ "OS-DCF:diskConfig": "AUTO",
+ "hostname": "new-server-hostname.example.com"
+ }
+}
diff --git a/nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/server-update-resp.json.tpl b/nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/server-update-resp.json.tpl
new file mode 100644
index 0000000000..2adc16df5e
--- /dev/null
+++ b/nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/server-update-resp.json.tpl
@@ -0,0 +1,78 @@
+{
+ "server": {
+ "accessIPv4": "%(access_ip_v4)s",
+ "accessIPv6": "%(access_ip_v6)s",
+ "addresses": {
+ "private": [
+ {
+ "addr": "%(ip)s",
+ "OS-EXT-IPS-MAC:mac_addr": "00:0c:29:0d:11:74",
+ "OS-EXT-IPS:type": "fixed",
+ "version": 4
+ }
+ ]
+ },
+ "created": "%(isotime)s",
+ "description": null,
+ "locked": false,
+ "locked_reason": null,
+ "flavor": {
+ "disk": 1,
+ "ephemeral": 0,
+ "extra_specs": {},
+ "original_name": "m1.tiny",
+ "ram": 512,
+ "swap": 0,
+ "vcpus": 1
+ },
+ "hostId": "%(hostid)s",
+ "id": "%(id)s",
+ "image": {
+ "id": "%(uuid)s",
+ "links": [
+ {
+ "href": "%(compute_endpoint)s/images/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "key_name": null,
+ "links": [
+ {
+ "href": "%(versioned_compute_endpoint)s/servers/%(uuid)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(compute_endpoint)s/servers/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ],
+ "metadata": {
+ "My Server Name": "Apache1"
+ },
+ "name": "new-server-test",
+ "config_drive": "",
+ "OS-DCF:diskConfig": "AUTO",
+ "OS-EXT-AZ:availability_zone": "us-west",
+ "OS-EXT-SRV-ATTR:hostname": "new-server-hostname.example.com",
+ "OS-EXT-STS:power_state": 1,
+ "OS-EXT-STS:task_state": null,
+ "OS-EXT-STS:vm_state": "active",
+ "os-extended-volumes:volumes_attached": [],
+ "OS-SRV-USG:launched_at": "%(strtime)s",
+ "OS-SRV-USG:terminated_at": null,
+ "progress": 0,
+ "security_groups": [
+ {
+ "name": "default"
+ }
+ ],
+ "server_groups": [],
+ "status": "ACTIVE",
+ "tags": [],
+ "tenant_id": "6f70656e737461636b20342065766572",
+ "trusted_image_certificates": null,
+ "updated": "%(isotime)s",
+ "user_id": "fake"
+ }
+}
diff --git a/nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/servers-details-resp.json.tpl b/nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/servers-details-resp.json.tpl
new file mode 100644
index 0000000000..f49d21e7a2
--- /dev/null
+++ b/nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/servers-details-resp.json.tpl
@@ -0,0 +1,88 @@
+{
+ "servers": [
+ {
+ "accessIPv4": "%(access_ip_v4)s",
+ "accessIPv6": "%(access_ip_v6)s",
+ "addresses": {
+ "private": [
+ {
+ "addr": "%(ip)s",
+ "OS-EXT-IPS-MAC:mac_addr": "00:0c:29:0d:11:74",
+ "OS-EXT-IPS:type": "fixed",
+ "version": 4
+ }
+ ]
+ },
+ "created": "%(isotime)s",
+ "description": "",
+ "flavor": {
+ "disk": 1,
+ "ephemeral": 0,
+ "extra_specs": {},
+ "original_name": "m1.tiny",
+ "ram": 512,
+ "swap": 0,
+ "vcpus": 1
+ },
+ "hostId": "%(hostid)s",
+ "id": "%(id)s",
+ "image": {
+ "id": "%(uuid)s",
+ "links": [
+ {
+ "href": "%(compute_endpoint)s/images/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "key_name": null,
+ "links": [
+ {
+ "href": "%(versioned_compute_endpoint)s/servers/%(uuid)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(compute_endpoint)s/servers/%(id)s",
+ "rel": "bookmark"
+ }
+ ],
+ "metadata": {
+ "My Server Name": "Apache1"
+ },
+ "name": "new-server-test",
+ "config_drive": "%(cdrive)s",
+ "locked": false,
+ "locked_reason": "",
+ "OS-DCF:diskConfig": "AUTO",
+ "OS-EXT-AZ:availability_zone": "nova",
+ "OS-EXT-SRV-ATTR:hostname": "custom-hostname.example.com",
+ "OS-EXT-STS:power_state": 1,
+ "OS-EXT-STS:task_state": null,
+ "OS-EXT-STS:vm_state": "active",
+ "os-extended-volumes:volumes_attached": [
+ {"id": "volume_id1", "delete_on_termination": false},
+ {"id": "volume_id2", "delete_on_termination": false}
+ ],
+ "OS-SRV-USG:launched_at": "%(strtime)s",
+ "OS-SRV-USG:terminated_at": null,
+ "progress": 0,
+ "security_groups": [
+ {
+ "name": "default"
+ }
+ ],
+ "status": "ACTIVE",
+ "tags": [],
+ "tenant_id": "6f70656e737461636b20342065766572",
+ "trusted_image_certificates": null,
+ "updated": "%(isotime)s",
+ "user_id": "fake"
+ }
+ ],
+ "servers_links": [
+ {
+ "href": "%(versioned_compute_endpoint)s/servers/detail?limit=1&marker=%(id)s",
+ "rel": "next"
+ }
+ ]
+}
diff --git a/nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/servers-list-resp.json.tpl b/nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/servers-list-resp.json.tpl
new file mode 100644
index 0000000000..9cdb3aa644
--- /dev/null
+++ b/nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/servers-list-resp.json.tpl
@@ -0,0 +1,24 @@
+{
+ "servers": [
+ {
+ "id": "%(id)s",
+ "links": [
+ {
+ "href": "%(versioned_compute_endpoint)s/servers/%(id)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(compute_endpoint)s/servers/%(id)s",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "new-server-test"
+ }
+ ],
+ "servers_links": [
+ {
+ "href": "%(versioned_compute_endpoint)s/servers?limit=1&marker=%(id)s",
+ "rel": "next"
+ }
+ ]
+}
diff --git a/nova/tests/functional/api_sample_tests/test_baremetal_nodes.py b/nova/tests/functional/api_sample_tests/test_baremetal_nodes.py
index 59ef2496b5..569df728e3 100644
--- a/nova/tests/functional/api_sample_tests/test_baremetal_nodes.py
+++ b/nova/tests/functional/api_sample_tests/test_baremetal_nodes.py
@@ -12,7 +12,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from nova.tests.functional.api_sample_tests import api_sample_base
diff --git a/nova/tests/functional/api_sample_tests/test_compare_result.py b/nova/tests/functional/api_sample_tests/test_compare_result.py
index 652b9c9035..1b68439184 100644
--- a/nova/tests/functional/api_sample_tests/test_compare_result.py
+++ b/nova/tests/functional/api_sample_tests/test_compare_result.py
@@ -13,8 +13,8 @@
# under the License.
import copy
+from unittest import mock
-import mock
import testtools
from nova import test
diff --git a/nova/tests/functional/api_sample_tests/test_create_backup.py b/nova/tests/functional/api_sample_tests/test_create_backup.py
index 2e5758c36e..cf454e948b 100644
--- a/nova/tests/functional/api_sample_tests/test_create_backup.py
+++ b/nova/tests/functional/api_sample_tests/test_create_backup.py
@@ -13,7 +13,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from nova.tests import fixtures
from nova.tests.functional.api_sample_tests import test_servers
diff --git a/nova/tests/functional/api_sample_tests/test_evacuate.py b/nova/tests/functional/api_sample_tests/test_evacuate.py
index 1b12e2caf4..15efb39d44 100644
--- a/nova/tests/functional/api_sample_tests/test_evacuate.py
+++ b/nova/tests/functional/api_sample_tests/test_evacuate.py
@@ -13,7 +13,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from nova import objects
from nova.tests.functional.api_sample_tests import test_servers
@@ -79,7 +79,8 @@ class EvacuateJsonTest(test_servers.ServersSampleBase):
injected_files=mock.ANY, new_pass="MySecretPass",
orig_sys_metadata=mock.ANY, bdms=mock.ANY, recreate=mock.ANY,
on_shared_storage=False, preserve_ephemeral=mock.ANY,
- host='testHost', request_spec=mock.ANY)
+ host='testHost', request_spec=mock.ANY,
+ reimage_boot_volume=False, target_state=None)
@mock.patch('nova.conductor.manager.ComputeTaskManager.rebuild_instance')
def test_server_evacuate_find_host(self, rebuild_mock):
@@ -95,7 +96,8 @@ class EvacuateJsonTest(test_servers.ServersSampleBase):
injected_files=mock.ANY, new_pass="MySecretPass",
orig_sys_metadata=mock.ANY, bdms=mock.ANY, recreate=mock.ANY,
on_shared_storage=False, preserve_ephemeral=mock.ANY,
- host=None, request_spec=mock.ANY)
+ host=None, request_spec=mock.ANY,
+ reimage_boot_volume=False, target_state=None)
class EvacuateJsonTestV214(EvacuateJsonTest):
@@ -116,7 +118,8 @@ class EvacuateJsonTestV214(EvacuateJsonTest):
injected_files=mock.ANY, new_pass="MySecretPass",
orig_sys_metadata=mock.ANY, bdms=mock.ANY, recreate=mock.ANY,
on_shared_storage=None, preserve_ephemeral=mock.ANY,
- host='testHost', request_spec=mock.ANY)
+ host='testHost', request_spec=mock.ANY,
+ reimage_boot_volume=False, target_state=None)
@mock.patch('nova.conductor.manager.ComputeTaskManager.rebuild_instance')
def test_server_evacuate_find_host(self, rebuild_mock):
@@ -131,7 +134,8 @@ class EvacuateJsonTestV214(EvacuateJsonTest):
injected_files=mock.ANY, new_pass="MySecretPass",
orig_sys_metadata=mock.ANY, bdms=mock.ANY, recreate=mock.ANY,
on_shared_storage=None, preserve_ephemeral=mock.ANY,
- host=None, request_spec=mock.ANY)
+ host=None, request_spec=mock.ANY,
+ reimage_boot_volume=False, target_state=None)
class EvacuateJsonTestV229(EvacuateJsonTestV214):
@@ -158,7 +162,8 @@ class EvacuateJsonTestV229(EvacuateJsonTestV214):
injected_files=mock.ANY, new_pass="MySecretPass",
orig_sys_metadata=mock.ANY, bdms=mock.ANY, recreate=mock.ANY,
on_shared_storage=None, preserve_ephemeral=mock.ANY,
- host=None, request_spec=mock.ANY)
+ host=None, request_spec=mock.ANY,
+ reimage_boot_volume=False, target_state=None)
@mock.patch('nova.conductor.manager.ComputeTaskManager.rebuild_instance')
@mock.patch('nova.objects.ComputeNodeList.get_all_by_host')
@@ -178,7 +183,8 @@ class EvacuateJsonTestV229(EvacuateJsonTestV214):
injected_files=mock.ANY, new_pass="MySecretPass",
orig_sys_metadata=mock.ANY, bdms=mock.ANY, recreate=mock.ANY,
on_shared_storage=None, preserve_ephemeral=mock.ANY,
- host='testHost', request_spec=mock.ANY)
+ host='testHost', request_spec=mock.ANY,
+ reimage_boot_volume=False, target_state=None)
class EvacuateJsonTestV268(EvacuateJsonTestV229):
@@ -204,8 +210,47 @@ class EvacuateJsonTestV268(EvacuateJsonTestV229):
injected_files=mock.ANY, new_pass="MySecretPass",
orig_sys_metadata=mock.ANY, bdms=mock.ANY, recreate=mock.ANY,
on_shared_storage=None, preserve_ephemeral=mock.ANY,
- host=None, request_spec=mock.ANY)
+ host=None, request_spec=mock.ANY,
+ reimage_boot_volume=False, target_state=None)
def test_server_evacuate_with_force(self):
# doesn't apply to v2.68+, which removed the ability to force migrate
pass
+
+
+class EvacuateJsonTestV295(EvacuateJsonTestV268):
+ microversion = '2.95'
+ scenarios = [('v2_95', {'api_major_version': 'v2.1'})]
+
+ @mock.patch('nova.conductor.manager.ComputeTaskManager.rebuild_instance')
+ def test_server_evacuate(self, rebuild_mock):
+ req_subs = {
+ "adminPass": "MySecretPass",
+ }
+ self._test_evacuate(req_subs, 'server-evacuate-req',
+ server_resp=None, expected_resp_code=200)
+
+ rebuild_mock.assert_called_once_with(mock.ANY, instance=mock.ANY,
+ orig_image_ref=mock.ANY, image_ref=mock.ANY,
+ injected_files=mock.ANY, new_pass="MySecretPass",
+ orig_sys_metadata=mock.ANY, bdms=mock.ANY, recreate=mock.ANY,
+ on_shared_storage=None, preserve_ephemeral=mock.ANY,
+ host=None, request_spec=mock.ANY,
+ reimage_boot_volume=False, target_state="stopped")
+
+ @mock.patch('nova.conductor.manager.ComputeTaskManager.rebuild_instance')
+ def test_server_evacuate_find_host(self, rebuild_mock):
+ req_subs = {
+ 'host': 'testHost',
+ "adminPass": "MySecretPass",
+ }
+ self._test_evacuate(req_subs, 'server-evacuate-find-host-req',
+ server_resp=None, expected_resp_code=200)
+
+ rebuild_mock.assert_called_once_with(mock.ANY, instance=mock.ANY,
+ orig_image_ref=mock.ANY, image_ref=mock.ANY,
+ injected_files=mock.ANY, new_pass="MySecretPass",
+ orig_sys_metadata=mock.ANY, bdms=mock.ANY, recreate=mock.ANY,
+ on_shared_storage=None, preserve_ephemeral=mock.ANY,
+ host=None, request_spec=mock.ANY,
+ reimage_boot_volume=False, target_state="stopped")
diff --git a/nova/tests/functional/api_sample_tests/test_hypervisors.py b/nova/tests/functional/api_sample_tests/test_hypervisors.py
index f402f9ebde..f5832ab4ac 100644
--- a/nova/tests/functional/api_sample_tests/test_hypervisors.py
+++ b/nova/tests/functional/api_sample_tests/test_hypervisors.py
@@ -13,7 +13,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from nova.tests.functional.api_sample_tests import api_sample_base
diff --git a/nova/tests/functional/api_sample_tests/test_images.py b/nova/tests/functional/api_sample_tests/test_images.py
index 924bc7768f..c84e566409 100644
--- a/nova/tests/functional/api_sample_tests/test_images.py
+++ b/nova/tests/functional/api_sample_tests/test_images.py
@@ -19,10 +19,29 @@ from nova.tests.functional.api_sample_tests import api_sample_base
class ImagesSampleJsonTest(api_sample_base.ApiSampleTestBaseV21):
sample_dir = 'images'
+ def generalize_subs(self, subs, vanilla_regexes):
+ """Give the test a chance to modify subs after the server response
+ was verified, and before the on-disk doc/api_samples file is checked.
+ """
+ # When comparing the template to the sample we just care that the image
+ # IDs are UUIDs.
+ subs['eph_encryption_id'] = vanilla_regexes['uuid']
+ subs['eph_encryption_disabled_id'] = vanilla_regexes['uuid']
+ subs['eph_encryption_luks_id'] = vanilla_regexes['uuid']
+ subs['eph_encryption_plain_id'] = vanilla_regexes['uuid']
+ return subs
+
def test_images_list(self):
# Get api sample of images get list request.
response = self._do_get('images')
- self._verify_response('images-list-get-resp', {}, response, 200)
+ subs = {
+ 'eph_encryption_id': self.glance.eph_encryption['id'],
+ 'eph_encryption_disabled_id':
+ self.glance.eph_encryption_disabled['id'],
+ 'eph_encryption_luks_id': self.glance.eph_encryption_luks['id'],
+ 'eph_encryption_plain_id': self.glance.eph_encryption_plain['id'],
+ }
+ self._verify_response('images-list-get-resp', subs, response, 200)
def test_image_get(self):
# Get api sample of one single image details request.
@@ -34,7 +53,14 @@ class ImagesSampleJsonTest(api_sample_base.ApiSampleTestBaseV21):
def test_images_details(self):
# Get api sample of all images details request.
response = self._do_get('images/detail')
- self._verify_response('images-details-get-resp', {}, response, 200)
+ subs = {
+ 'eph_encryption_id': self.glance.eph_encryption['id'],
+ 'eph_encryption_disabled_id':
+ self.glance.eph_encryption_disabled['id'],
+ 'eph_encryption_luks_id': self.glance.eph_encryption_luks['id'],
+ 'eph_encryption_plain_id': self.glance.eph_encryption_plain['id'],
+ }
+ self._verify_response('images-details-get-resp', subs, response, 200)
def test_image_metadata_get(self):
# Get api sample of an image metadata request.
diff --git a/nova/tests/functional/api_sample_tests/test_keypairs.py b/nova/tests/functional/api_sample_tests/test_keypairs.py
index eab88f61e1..a121b98449 100644
--- a/nova/tests/functional/api_sample_tests/test_keypairs.py
+++ b/nova/tests/functional/api_sample_tests/test_keypairs.py
@@ -319,3 +319,66 @@ class KeyPairsV235SampleJsonTest(api_sample_base.ApiSampleTestBaseV21):
% keypairs_user2[1])
subs = {'keypair_name': keypairs_user2[2]}
self._verify_response('keypairs-list-user2-resp', subs, response, 200)
+
+
+class KeyPairsV292SampleJsonTest(api_sample_base.ApiSampleTestBaseV21):
+ ADMIN_API = True
+ sample_dir = 'os-keypairs'
+ microversion = '2.92'
+ expected_post_status_code = 201
+ scenarios = [('v2_92', {'api_major_version': 'v2.1'})]
+
+ def setUp(self):
+ super(KeyPairsV292SampleJsonTest, self).setUp()
+ self.api.microversion = self.microversion
+
+ # NOTE(sbauza): This method is stupidly needed for _verify_response().
+ # See the TODO(sdague) above.
+ def generalize_subs(self, subs, vanilla_regexes):
+ subs['keypair_name'] = '[0-9a-zA-Z-_.@ ]+'
+ return subs
+
+ def test_keypairs_post_no_longer_supported(self):
+ subs = {
+ 'keypair_name': 'foo',
+ 'keypair_type': keypair_obj.KEYPAIR_TYPE_SSH,
+ 'user_id': 'fake'
+ }
+ response = self._do_post('os-keypairs', 'keypairs-post-req', subs)
+ self.assertEqual(400, response.status_code)
+
+ def test_keypairs_import_key_invalid_name(self):
+ public_key = fake_crypto.get_ssh_public_key()
+ subs = {
+ 'keypair_name': '!nvalid=name|',
+ 'keypair_type': keypair_obj.KEYPAIR_TYPE_SSH,
+ 'user_id': 'fake',
+ 'public_key': public_key,
+ }
+ response = self._do_post('os-keypairs', 'keypairs-import-post-req',
+ subs)
+ self.assertEqual(400, response.status_code)
+
+ def _test_keypairs_import_key_post(self, name=None):
+ if not name:
+ name = 'keypair-' + uuids.fake
+ public_key = fake_crypto.get_ssh_public_key()
+ params = {
+ 'keypair_name': name,
+ 'keypair_type': keypair_obj.KEYPAIR_TYPE_SSH,
+ 'user_id': 'fake',
+ 'public_key': public_key,
+ }
+ response = self._do_post('os-keypairs', 'keypairs-import-post-req',
+ params)
+ # NOTE(sbauza): We do some crazy regexp change in _verify_response() so
+ # we only need to pass the keypair name.
+ subs = {'keypair_name': name}
+ self._verify_response('keypairs-import-post-resp', subs, response,
+ self.expected_post_status_code)
+
+ def test_keypairs_import_key_post(self):
+ self._test_keypairs_import_key_post()
+
+ def test_keypairs_import_key_special_characters(self):
+ self._test_keypairs_import_key_post(name='my-key@ my.host')
diff --git a/nova/tests/functional/api_sample_tests/test_migrate_server.py b/nova/tests/functional/api_sample_tests/test_migrate_server.py
index 59321c845a..5fe7070410 100644
--- a/nova/tests/functional/api_sample_tests/test_migrate_server.py
+++ b/nova/tests/functional/api_sample_tests/test_migrate_server.py
@@ -13,7 +13,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from oslo_utils import versionutils
from nova import exception
diff --git a/nova/tests/functional/api_sample_tests/test_networks.py b/nova/tests/functional/api_sample_tests/test_networks.py
index 0a75d156cb..dd5d945e2a 100644
--- a/nova/tests/functional/api_sample_tests/test_networks.py
+++ b/nova/tests/functional/api_sample_tests/test_networks.py
@@ -13,7 +13,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from nova import exception
from nova.tests import fixtures as nova_fixtures
diff --git a/nova/tests/functional/api_sample_tests/test_remote_consoles.py b/nova/tests/functional/api_sample_tests/test_remote_consoles.py
index 986826bfee..e304402ee9 100644
--- a/nova/tests/functional/api_sample_tests/test_remote_consoles.py
+++ b/nova/tests/functional/api_sample_tests/test_remote_consoles.py
@@ -13,6 +13,10 @@
# License for the specific language governing permissions and limitations
# under the License.
+from unittest import mock
+
+from nova.compute import api as compute
+from nova import exception
from nova.tests.functional.api_sample_tests import test_servers
HTTP_RE = r'(https?://)([\w\d:#@%/;$()~_?\+-=\\.&](#!)?)*'
@@ -38,6 +42,22 @@ class ConsolesSampleJsonTests(test_servers.ServersSampleBase):
self._verify_response('get-vnc-console-post-resp', {'url': HTTP_RE},
response, 200)
+ @mock.patch.object(compute.API, 'get_vnc_console')
+ def test_get_vnc_console_instance_invalid_state(self,
+ mock_get_vnc_console):
+ uuid = self._post_server()
+
+ def fake_get_vnc_console(*args, **kwargs):
+ raise exception.InstanceInvalidState(
+ attr='fake_attr', state='fake_state', method='fake_method',
+ instance_uuid=uuid)
+
+ mock_get_vnc_console.side_effect = fake_get_vnc_console
+ response = self._do_post('servers/%s/action' % uuid,
+ 'get-vnc-console-post-req',
+ {'action': 'os-getVNCConsole'})
+ self.assertEqual(409, response.status_code)
+
def test_get_spice_console(self):
uuid = self._post_server()
response = self._do_post('servers/%s/action' % uuid,
diff --git a/nova/tests/functional/api_sample_tests/test_server_migrations.py b/nova/tests/functional/api_sample_tests/test_server_migrations.py
index 15fb72945c..8ee3b6a36a 100644
--- a/nova/tests/functional/api_sample_tests/test_server_migrations.py
+++ b/nova/tests/functional/api_sample_tests/test_server_migrations.py
@@ -14,9 +14,9 @@
# under the License.
import datetime
+from unittest import mock
import futurist
-import mock
from nova.conductor import manager as conductor_manager
from nova import context
diff --git a/nova/tests/functional/api_sample_tests/test_server_password.py b/nova/tests/functional/api_sample_tests/test_server_password.py
index 11921291f1..8c4800103b 100644
--- a/nova/tests/functional/api_sample_tests/test_server_password.py
+++ b/nova/tests/functional/api_sample_tests/test_server_password.py
@@ -12,7 +12,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from nova.tests.functional.api_sample_tests import test_servers
diff --git a/nova/tests/functional/api_sample_tests/test_servers.py b/nova/tests/functional/api_sample_tests/test_servers.py
index aa07b88247..7679c9b734 100644
--- a/nova/tests/functional/api_sample_tests/test_servers.py
+++ b/nova/tests/functional/api_sample_tests/test_servers.py
@@ -618,6 +618,13 @@ class ServersSampleJson290Test(ServersSampleJsonTest):
ADMIN_API = False
+class ServersSampleJson294Test(ServersSampleJsonTest):
+ microversion = '2.94'
+ scenarios = [('v2_94', {'api_major_version': 'v2.1'})]
+ use_common_server_post = False
+ ADMIN_API = False
+
+
class ServersUpdateSampleJsonTest(ServersSampleBase):
# Many of the 'os_compute_api:servers:*' policies are admin-only, and we
@@ -702,6 +709,44 @@ class ServersUpdateSampleJson290Test(ServersUpdateSampleJsonTest):
self._verify_response('server-action-rebuild-resp', subs, resp, 202)
+class ServersUpdateSampleJson294Test(ServersUpdateSampleJsonTest):
+ microversion = '2.94'
+ scenarios = [('v2_94', {'api_major_version': 'v2.1'})]
+ ADMIN_API = False
+
+ def test_update_server(self):
+ uuid = self._post_server()
+ subs = {}
+ subs['hostid'] = '[a-f0-9]+'
+ subs['access_ip_v4'] = '1.2.3.4'
+ subs['access_ip_v6'] = '80fe::'
+ subs['hostname'] = 'updated-hostname.example.com'
+ response = self._do_put('servers/%s' % uuid,
+ 'server-update-req', subs)
+ self._verify_response('server-update-resp', subs, response, 200)
+
+ def test_server_rebuild(self):
+ uuid = self._post_server()
+ params = {
+ 'uuid': self.glance.auto_disk_config_enabled_image['id'],
+ 'name': 'foobar',
+ 'pass': 'seekr3t',
+ 'hostid': '[a-f0-9]+',
+ 'access_ip_v4': '1.2.3.4',
+ 'access_ip_v6': '80fe::',
+ 'hostname': 'updated-hostname.example.com',
+ }
+
+ resp = self._do_post(
+ 'servers/%s/action' % uuid,
+ 'server-action-rebuild',
+ params,
+ )
+ subs = params.copy()
+ del subs['uuid']
+ self._verify_response('server-action-rebuild-resp', subs, resp, 202)
+
+
class ServerSortKeysJsonTests(ServersSampleBase):
sample_dir = 'servers-sort'
diff --git a/nova/tests/functional/api_sample_tests/test_shelve.py b/nova/tests/functional/api_sample_tests/test_shelve.py
index 37d24b6cea..0dfef71055 100644
--- a/nova/tests/functional/api_sample_tests/test_shelve.py
+++ b/nova/tests/functional/api_sample_tests/test_shelve.py
@@ -15,10 +15,25 @@
import nova.conf
+from nova import objects
from nova.tests.functional.api_sample_tests import test_servers
+from oslo_utils.fixture import uuidsentinel
+from unittest import mock
CONF = nova.conf.CONF
+fake_aggregate = {
+ 'deleted': 0,
+ 'deleted_at': None,
+ 'created_at': None,
+ 'updated_at': None,
+ 'id': 123,
+ 'uuid': uuidsentinel.fake_aggregate,
+ 'name': 'us-west',
+ 'hosts': ['host01'],
+ 'metadetails': {'availability_zone': 'us-west'},
+}
+
class ShelveJsonTest(test_servers.ServersSampleBase):
# The 'os_compute_api:os-shelve:shelve_offload' policy is admin-only
@@ -30,9 +45,11 @@ class ShelveJsonTest(test_servers.ServersSampleBase):
# Don't offload instance, so we can test the offload call.
CONF.set_override('shelved_offload_time', -1)
- def _test_server_action(self, uuid, template, action):
+ def _test_server_action(self, uuid, template, action, subs=None):
+ subs = subs or {}
+ subs.update({'action': action})
response = self._do_post('servers/%s/action' % uuid,
- template, {'action': action})
+ template, subs)
self.assertEqual(202, response.status_code)
self.assertEqual("", response.text)
@@ -51,26 +68,288 @@ class ShelveJsonTest(test_servers.ServersSampleBase):
self._test_server_action(uuid, 'os-unshelve', 'unshelve')
-class UnshelveJson277Test(test_servers.ServersSampleBase):
+class UnshelveJson277Test(ShelveJsonTest):
+ ADMIN_API = False
sample_dir = "os-shelve"
microversion = '2.77'
scenarios = [('v2_77', {'api_major_version': 'v2.1'})]
+ def setUp(self):
+ super(UnshelveJson277Test, self).setUp()
+ # Almost all next tests require the instance to be shelve offloaded.
+ # So shelve offload the instance and skip the shelve_offload_test
+ # below.
+ CONF.set_override('shelved_offload_time', 0)
+
+ def test_shelve_offload(self):
+ # Skip this test as the instance is already shelve offloaded.
+ pass
+
+ def test_unshelve_with_az(self):
+ uuid = self._post_server()
+ self._test_server_action(uuid, 'os-shelve', 'shelve')
+ self._test_server_action(
+ uuid,
+ 'os-unshelve-az',
+ 'unshelve',
+ subs={"availability_zone": "us-west"}
+ )
+
+
+class UnshelveJson291Test(UnshelveJson277Test):
+ ADMIN_API = True
+ sample_dir = "os-shelve"
+ microversion = '2.91'
+ scenarios = [('v2_91', {'api_major_version': 'v2.1'})]
+
+ def _test_server_action_invalid(
+ self, uuid, template, action, subs=None, msg=None):
+ subs = subs or {}
+ subs.update({'action': action})
+ response = self._do_post('servers/%s/action' % uuid,
+ template, subs)
+ self.assertEqual(400, response.status_code)
+ self.assertIn(msg, response.text)
+
+ def test_unshelve_with_non_valid_host(self):
+ """Ensure an exception rise if host is invalid and
+ a http 400 error
+ """
+ uuid = self._post_server()
+ self._test_server_action(uuid, 'os-shelve', 'shelve')
+ self._test_server_action_invalid(
+ uuid, 'os-unshelve-host',
+ 'unshelve',
+ subs={'host': 'host01'},
+ msg='Compute host host01 could not be found.')
+
+ @mock.patch('nova.objects.aggregate._get_by_host_from_db')
+ @mock.patch('nova.objects.ComputeNodeList.get_all_by_host')
+ def test_unshelve_with_valid_host(
+ self, compute_node_get_all_by_host, mock_api_get_by_host):
+ """Ensure we can unshelve to a host
+ """
+ # Put compute in the correct az
+ mock_api_get_by_host.return_value = [fake_aggregate]
+
+ uuid = self._post_server()
+ self._test_server_action(uuid, 'os-shelve', 'shelve')
+ fake_computes = objects.ComputeNodeList(
+ objects=[
+ objects.ComputeNode(
+ host='host01',
+ uuid=uuidsentinel.host1,
+ hypervisor_hostname='host01')
+ ]
+ )
+ compute_node_get_all_by_host.return_value = fake_computes
+
+ self._test_server_action(
+ uuid,
+ 'os-unshelve-host',
+ 'unshelve',
+ subs={'host': 'host01'}
+ )
+
+ @mock.patch('nova.objects.aggregate._get_by_host_from_db')
+ @mock.patch('nova.objects.ComputeNodeList.get_all_by_host')
+ def test_unshelve_with_az_and_host(
+ self, compute_node_get_all_by_host, mock_api_get_by_host):
+ """Ensure we can unshelve to a host and az
+ """
+ # Put compute in the correct az
+ mock_api_get_by_host.return_value = [fake_aggregate]
+
+ uuid = self._post_server()
+ self._test_server_action(uuid, 'os-shelve', 'shelve')
+ fake_computes = objects.ComputeNodeList(
+ objects=[
+ objects.ComputeNode(
+ host='host01',
+ uuid=uuidsentinel.host1,
+ hypervisor_hostname='host01')
+ ]
+ )
+ compute_node_get_all_by_host.return_value = fake_computes
+
+ self._test_server_action(
+ uuid,
+ 'os-unshelve-host',
+ 'unshelve',
+ subs={'host': 'host01', 'availability_zone': 'us-west'},
+ )
+
+ @mock.patch('nova.objects.aggregate._get_by_host_from_db')
+ @mock.patch('nova.objects.ComputeNodeList.get_all_by_host')
+ def test_unshelve_with_unpin_az_and_host(
+ self, compute_node_get_all_by_host, mock_api_get_by_host):
+ """Ensure we can unshelve to a host and az
+ """
+ # Put compute in the correct az
+ mock_api_get_by_host.return_value = [fake_aggregate]
+
+ uuid = self._post_server()
+ self._test_server_action(uuid, 'os-shelve', 'shelve')
+ fake_computes = objects.ComputeNodeList(
+ objects=[
+ objects.ComputeNode(
+ host='host01',
+ uuid=uuidsentinel.host1,
+ hypervisor_hostname='host01')
+ ]
+ )
+ compute_node_get_all_by_host.return_value = fake_computes
+
+ self._test_server_action(
+ uuid,
+ 'os-unshelve-host-and-unpin-az',
+ 'unshelve',
+ subs={'host': 'host01'},
+ )
+
+ @mock.patch('nova.objects.aggregate._get_by_host_from_db')
+ @mock.patch('nova.objects.ComputeNodeList.get_all_by_host')
+ def test_unshelve_with_unpin_az(
+ self, compute_node_get_all_by_host, mock_api_get_by_host):
+ """Ensure we can unpin an az
+ """
+ # Put compute in the correct az
+ mock_api_get_by_host.return_value = [fake_aggregate]
+
+ uuid = self._post_server()
+ self._test_server_action(uuid, 'os-shelve', 'shelve')
+ fake_computes = objects.ComputeNodeList(
+ objects=[
+ objects.ComputeNode(
+ host='host01',
+ uuid=uuidsentinel.host1,
+ hypervisor_hostname='host01')
+ ]
+ )
+ compute_node_get_all_by_host.return_value = fake_computes
+
+ self._test_server_action(
+ uuid,
+ 'os-unshelve-unpin-az',
+ 'unshelve',
+ subs={'host': 'host01'},
+ )
+
+
+class UnshelveJson291NonAdminTest(UnshelveJson291Test):
+ # Use non admin api credentials.
+ ADMIN_API = False
+ sample_dir = "os-shelve"
+ microversion = '2.91'
+ scenarios = [('v2_91', {'api_major_version': 'v2.1'})]
+
+ def _test_server_action_invalid(self, uuid, template, action, subs=None):
+ subs = subs or {}
+ subs.update({'action': action})
+ response = self._do_post('servers/%s/action' % uuid,
+ template, subs)
+ self.assertEqual(403, response.status_code)
+ self.assertIn(
+ "Policy doesn\'t allow os_compute_api:os-shelve:unshelve_to_host" +
+ " to be performed.", response.text)
+
def _test_server_action(self, uuid, template, action, subs=None):
subs = subs or {}
subs.update({'action': action})
response = self._do_post('servers/%s/action' % uuid,
template, subs)
self.assertEqual(202, response.status_code)
- self.assertEqual("", response.text)
+ self.assertEqual('', response.text)
+
+ def test_unshelve_with_non_valid_host(self):
+ """Ensure an exception rise if user is not admin.
+ a http 403 error
+ """
+ uuid = self._post_server()
+ self._test_server_action(uuid, 'os-shelve', 'shelve')
+ self._test_server_action_invalid(
+ uuid,
+ 'os-unshelve-host',
+ 'unshelve',
+ subs={'host': 'host01'}
+ )
+
+ @mock.patch('nova.objects.aggregate._get_by_host_from_db')
+ @mock.patch('nova.objects.ComputeNodeList.get_all_by_host')
+ def test_unshelve_with_unpin_az_and_host(
+ self, compute_node_get_all_by_host, mock_api_get_by_host):
+ # Put compute in the correct az
+ mock_api_get_by_host.return_value = [fake_aggregate]
- def test_unshelve_with_az(self):
uuid = self._post_server()
self._test_server_action(uuid, 'os-shelve', 'shelve')
- self._test_server_action(uuid, 'os-unshelve', 'unshelve',
- subs={"availability_zone": "us-west"})
+ fake_computes = objects.ComputeNodeList(
+ objects=[
+ objects.ComputeNode(
+ host='host01',
+ uuid=uuidsentinel.host1,
+ hypervisor_hostname='host01')
+ ]
+ )
+ compute_node_get_all_by_host.return_value = fake_computes
+
+ self._test_server_action_invalid(
+ uuid,
+ 'os-unshelve-host-and-unpin-az',
+ 'unshelve',
+ subs={'host': 'host01'},
+ )
+
+ @mock.patch('nova.objects.aggregate._get_by_host_from_db')
+ @mock.patch('nova.objects.ComputeNodeList.get_all_by_host')
+ def test_unshelve_with_valid_host(
+ self, compute_node_get_all_by_host, mock_api_get_by_host):
+ # Put compute in the correct az
+ mock_api_get_by_host.return_value = [fake_aggregate]
+
+ uuid = self._post_server()
+ self._test_server_action(uuid, 'os-shelve', 'shelve')
+ fake_computes = objects.ComputeNodeList(
+ objects=[
+ objects.ComputeNode(
+ host='host01',
+ uuid=uuidsentinel.host1,
+ hypervisor_hostname='host01')
+ ]
+ )
+ compute_node_get_all_by_host.return_value = fake_computes
+
+ self._test_server_action_invalid(
+ uuid,
+ 'os-unshelve-host',
+ 'unshelve',
+ subs={'host': 'host01'}
+ )
+
+ @mock.patch('nova.objects.aggregate._get_by_host_from_db')
+ @mock.patch('nova.objects.ComputeNodeList.get_all_by_host')
+ def test_unshelve_with_az_and_host(
+ self, compute_node_get_all_by_host, mock_api_get_by_host):
+ """Ensure we can unshelve to a host and az
+ """
+ # Put compute in the correct az
+ mock_api_get_by_host.return_value = [fake_aggregate]
- def test_unshelve_no_az(self):
uuid = self._post_server()
self._test_server_action(uuid, 'os-shelve', 'shelve')
- self._test_server_action(uuid, 'os-unshelve-null', 'unshelve')
+ fake_computes = objects.ComputeNodeList(
+ objects=[
+ objects.ComputeNode(
+ host='host01',
+ uuid=uuidsentinel.host1,
+ hypervisor_hostname='host01')
+ ]
+ )
+ compute_node_get_all_by_host.return_value = fake_computes
+
+ self._test_server_action_invalid(
+ uuid,
+ 'os-unshelve-host',
+ 'unshelve',
+ subs={'host': 'host01', 'availability_zone': 'us-west'},
+ )
diff --git a/nova/tests/functional/api_sample_tests/test_simple_tenant_usage.py b/nova/tests/functional/api_sample_tests/test_simple_tenant_usage.py
index 5ee3ba7163..36b224510d 100644
--- a/nova/tests/functional/api_sample_tests/test_simple_tenant_usage.py
+++ b/nova/tests/functional/api_sample_tests/test_simple_tenant_usage.py
@@ -13,9 +13,9 @@
# under the License.
import datetime
+from unittest import mock
from urllib import parse
-import mock
from oslo_utils import timeutils
from nova.tests.functional.api_sample_tests import test_servers
diff --git a/nova/tests/functional/compute/test_init_host.py b/nova/tests/functional/compute/test_init_host.py
index f506f6ed59..f5c821e116 100644
--- a/nova/tests/functional/compute/test_init_host.py
+++ b/nova/tests/functional/compute/test_init_host.py
@@ -10,8 +10,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
import time
+from unittest import mock
from nova import context as nova_context
from nova import objects
diff --git a/nova/tests/functional/compute/test_live_migration.py b/nova/tests/functional/compute/test_live_migration.py
index b4d68cd1d5..fb7315a23c 100644
--- a/nova/tests/functional/compute/test_live_migration.py
+++ b/nova/tests/functional/compute/test_live_migration.py
@@ -12,7 +12,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from oslo_serialization import jsonutils
from oslo_utils.fixture import uuidsentinel as uuids
diff --git a/nova/tests/functional/compute/test_migration_list.py b/nova/tests/functional/compute/test_migration_list.py
index 49ea236bc4..bfcb018320 100644
--- a/nova/tests/functional/compute/test_migration_list.py
+++ b/nova/tests/functional/compute/test_migration_list.py
@@ -64,7 +64,7 @@ class TestMigrationListObjects(test.TestCase):
self.context, filters, limit, marker,
sort_keys, sort_dirs)
found_uuids = [x.uuid for x in migs]
- had_uuids = sorted([x['uuid'] for x in self.migrations])
+ had_uuids = sorted([x.uuid for x in self.migrations])
self.assertEqual(had_uuids, found_uuids)
def test_get_instance_objects_sorted_paged(self):
@@ -80,7 +80,7 @@ class TestMigrationListObjects(test.TestCase):
['created_at'], ['asc'])
self.assertEqual(len(self.migrations), len(migp1))
migp2 = migration_list.get_migration_objects_sorted(
- self.context, {}, None, migp1[-1]['uuid'],
+ self.context, {}, None, migp1[-1].uuid,
['created_at'], ['asc'])
self.assertEqual(0, len(migp2))
@@ -93,7 +93,7 @@ class TestMigrationListObjects(test.TestCase):
def test_get_sorted_with_limit(self):
migs = migration_list.get_migration_objects_sorted(
self.context, {}, 2, None, ['uuid'], ['asc'])
- uuids = [mig['uuid'] for mig in migs]
+ uuids = [mig.uuid for mig in migs]
had_uuids = [mig.uuid for mig in self.migrations]
self.assertEqual(sorted(had_uuids)[:2], uuids)
self.assertEqual(2, len(uuids))
diff --git a/nova/tests/functional/compute/test_resource_tracker.py b/nova/tests/functional/compute/test_resource_tracker.py
index 81b7dfb68c..139fb5e6ac 100644
--- a/nova/tests/functional/compute/test_resource_tracker.py
+++ b/nova/tests/functional/compute/test_resource_tracker.py
@@ -12,9 +12,9 @@
import copy
import os
+from unittest import mock
import fixtures
-import mock
import os_resource_classes as orc
import os_traits
from oslo_utils.fixture import uuidsentinel as uuids
@@ -29,7 +29,6 @@ from nova import conf
from nova import context
from nova import objects
from nova import test
-from nova.tests import fixtures as nova_fixtures
from nova.tests.functional import fixtures as func_fixtures
from nova.tests.functional import integrated_helpers
from nova.virt import driver as virt_driver
@@ -249,6 +248,7 @@ class IronicResourceTrackerTest(test.TestCase):
'numa_topology': None,
'resource_class': None, # Act like admin hasn't set yet...
'stats': stats,
+ 'uuid': str(getattr(uuids, nodename)),
}
self.rt.update_available_resource(self.ctx, nodename)
@@ -694,15 +694,6 @@ class TestProviderConfig(integrated_helpers.ProviderUsageBaseTestCase):
feature a vm cannot be spawning using a custom trait and then start a
compute service that provides that trait.
"""
-
- self.useFixture(nova_fixtures.NeutronFixture(self))
- self.useFixture(nova_fixtures.GlanceFixture(self))
-
- # Start nova services.
- self.api = self.useFixture(nova_fixtures.OSAPIFixture(
- api_version='v2.1')).admin_api
- self.api.microversion = 'latest'
- self.start_service('conductor')
# start nova-compute that will not have the additional trait.
self._start_compute("fake-host-1")
diff --git a/nova/tests/functional/db/test_aggregate.py b/nova/tests/functional/db/test_aggregate.py
index 35d9024576..be3cd67e38 100644
--- a/nova/tests/functional/db/test_aggregate.py
+++ b/nova/tests/functional/db/test_aggregate.py
@@ -11,8 +11,8 @@
# under the License.
from copy import deepcopy
+from unittest import mock
-import mock
from oslo_db import exception as db_exc
from oslo_utils.fixture import uuidsentinel
from oslo_utils import timeutils
diff --git a/nova/tests/functional/db/test_compute_api.py b/nova/tests/functional/db/test_compute_api.py
index 49fa10281a..0cf3e4f5e9 100644
--- a/nova/tests/functional/db/test_compute_api.py
+++ b/nova/tests/functional/db/test_compute_api.py
@@ -10,7 +10,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from oslo_utils.fixture import uuidsentinel as uuids
from nova.compute import api as compute_api
diff --git a/nova/tests/functional/db/test_compute_node.py b/nova/tests/functional/db/test_compute_node.py
index 0c605121e4..1bca4eecf6 100644
--- a/nova/tests/functional/db/test_compute_node.py
+++ b/nova/tests/functional/db/test_compute_node.py
@@ -267,7 +267,7 @@ class ComputeNodeTestCase(test.TestCase):
self.assertEqual(res, (1, 1))
# the ratio is refreshed to CONF.initial_xxx_allocation_ratio
- # beacause CONF.xxx_allocation_ratio is None
+ # because CONF.xxx_allocation_ratio is None
cns = db.compute_node_get_all(self.context)
# the ratio is refreshed to CONF.xxx_allocation_ratio
for cn in cns:
diff --git a/nova/tests/functional/db/test_host_mapping.py b/nova/tests/functional/db/test_host_mapping.py
index e4b5a365a7..3d0c5575ca 100644
--- a/nova/tests/functional/db/test_host_mapping.py
+++ b/nova/tests/functional/db/test_host_mapping.py
@@ -10,7 +10,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import uuidutils
diff --git a/nova/tests/functional/db/test_instance_group.py b/nova/tests/functional/db/test_instance_group.py
index 50314f17ac..6a801f2a55 100644
--- a/nova/tests/functional/db/test_instance_group.py
+++ b/nova/tests/functional/db/test_instance_group.py
@@ -10,7 +10,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_versionedobjects import fixture as ovo_fixture
diff --git a/nova/tests/functional/db/test_instance_mapping.py b/nova/tests/functional/db/test_instance_mapping.py
index 1b740df629..ef78e7910a 100644
--- a/nova/tests/functional/db/test_instance_mapping.py
+++ b/nova/tests/functional/db/test_instance_mapping.py
@@ -10,7 +10,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from oslo_utils.fixture import uuidsentinel
from oslo_utils import uuidutils
diff --git a/nova/tests/functional/db/test_quota.py b/nova/tests/functional/db/test_quota.py
index 8c2b637269..cdadebd408 100644
--- a/nova/tests/functional/db/test_quota.py
+++ b/nova/tests/functional/db/test_quota.py
@@ -10,8 +10,9 @@
# License for the specific language governing permissions and limitations
# under the License.
+from unittest import mock
+
import ddt
-import mock
from oslo_utils import uuidutils
from nova import context
diff --git a/nova/tests/functional/db/test_virtual_interface.py b/nova/tests/functional/db/test_virtual_interface.py
index 0d64f99cc8..2accb80c01 100644
--- a/nova/tests/functional/db/test_virtual_interface.py
+++ b/nova/tests/functional/db/test_virtual_interface.py
@@ -11,7 +11,8 @@
# under the License.
import datetime
-import mock
+from unittest import mock
+
from oslo_config import cfg
from oslo_utils import timeutils
diff --git a/nova/tests/functional/integrated_helpers.py b/nova/tests/functional/integrated_helpers.py
index 70918bc5f5..cdf71da0d4 100644
--- a/nova/tests/functional/integrated_helpers.py
+++ b/nova/tests/functional/integrated_helpers.py
@@ -247,6 +247,27 @@ class InstanceHelperMixin:
self.assertIn(error_in_tb, event['traceback'])
return event
+ def _assert_build_request_success(self, server_request):
+ server = self.api.post_server({'server': server_request})
+ self._wait_for_state_change(server, 'ACTIVE')
+ return server['id']
+
+ def _assert_build_request_schedule_failure(self, server_request):
+ server = self.api.post_server({'server': server_request})
+ self._wait_for_state_change(server, 'ERROR')
+
+ def _assert_bad_build_request_error(self, server_request):
+ ex = self.assertRaises(
+ api_client.OpenStackApiException, self.api.post_server,
+ {'server': server_request})
+ self.assertEqual(400, ex.response.status_code)
+
+ def _assert_build_request_error(self, server_request):
+ ex = self.assertRaises(
+ api_client.OpenStackApiException, self.api.post_server,
+ {'server': server_request})
+ self.assertEqual(500, ex.response.status_code)
+
def _wait_for_migration_status(self, server, expected_statuses):
"""Waits for a migration record with the given statuses to be found
for the given server, else the test fails. The migration record, if
@@ -540,8 +561,8 @@ class InstanceHelperMixin:
self.api.post_server_action(
server['id'],
{'os-migrateLive': {'host': None, 'block_migration': 'auto'}})
- self._wait_for_state_change(server, server_expected_state)
self._wait_for_migration_status(server, [migration_expected_state])
+ return self._wait_for_state_change(server, server_expected_state)
_live_migrate_server = _live_migrate
@@ -577,7 +598,7 @@ class InstanceHelperMixin:
def _evacuate_server(
self, server, extra_post_args=None, expected_host=None,
- expected_state='ACTIVE', expected_task_state=NOT_SPECIFIED,
+ expected_state='SHUTOFF', expected_task_state=NOT_SPECIFIED,
expected_migration_status='done'):
"""Evacuate a server."""
api = getattr(self, 'admin_api', self.api)
@@ -606,9 +627,18 @@ class InstanceHelperMixin:
self.api.post_server_action(server['id'], {'os-start': None})
return self._wait_for_state_change(server, 'ACTIVE')
- def _stop_server(self, server):
+ def _stop_server(self, server, wait_for_stop=True):
self.api.post_server_action(server['id'], {'os-stop': None})
- return self._wait_for_state_change(server, 'SHUTOFF')
+ if wait_for_stop:
+ return self._wait_for_state_change(server, 'SHUTOFF')
+ return server
+
+ def _snapshot_server(self, server, snapshot_name):
+ """Create server snapshot."""
+ self.api.post_server_action(
+ server['id'],
+ {'createImage': {'name': snapshot_name}}
+ )
class PlacementHelperMixin:
@@ -629,12 +659,16 @@ class PlacementHelperMixin:
'/resource_providers', version='1.14'
).body['resource_providers']
- def _get_all_rp_uuids_in_a_tree(self, in_tree_rp_uuid):
+ def _get_all_rps_in_a_tree(self, in_tree_rp_uuid):
rps = self.placement.get(
'/resource_providers?in_tree=%s' % in_tree_rp_uuid,
version='1.20',
).body['resource_providers']
- return [rp['uuid'] for rp in rps]
+ return rps
+
+ def _get_all_rp_uuids_in_a_tree(self, in_tree_rp_uuid):
+ return [
+ rp['uuid'] for rp in self._get_all_rps_in_a_tree(in_tree_rp_uuid)]
def _post_resource_provider(self, rp_name):
return self.placement.post(
@@ -842,6 +876,20 @@ class PlacementHelperMixin:
'Test expected a single migration but found %i' % len(migrations))
return migrations[0].uuid
+ def _reserve_placement_resource(self, rp_name, rc_name, reserved):
+ rp_uuid = self._get_provider_uuid_by_name(rp_name)
+ inv = self.placement.get(
+ '/resource_providers/%s/inventories/%s' % (rp_uuid, rc_name),
+ version='1.26'
+ ).body
+ inv["reserved"] = reserved
+ result = self.placement.put(
+ '/resource_providers/%s/inventories/%s' % (rp_uuid, rc_name),
+ version='1.26', body=inv
+ ).body
+ self.assertEqual(reserved, result["reserved"])
+ return result
+
class PlacementInstanceHelperMixin(InstanceHelperMixin, PlacementHelperMixin):
"""A placement-aware variant of InstanceHelperMixin."""
diff --git a/nova/tests/functional/libvirt/base.py b/nova/tests/functional/libvirt/base.py
index f53baa1e24..1ee46a3217 100644
--- a/nova/tests/functional/libvirt/base.py
+++ b/nova/tests/functional/libvirt/base.py
@@ -15,9 +15,10 @@
import copy
import io
+from unittest import mock
import fixtures
-import mock
+from oslo_utils.fixture import uuidsentinel as uuids
from nova import conf
from nova.tests import fixtures as nova_fixtures
@@ -42,7 +43,8 @@ class ServersTestBase(integrated_helpers._IntegratedTestBase):
super(ServersTestBase, self).setUp()
self.useFixture(nova_fixtures.LibvirtImageBackendFixture())
- self.useFixture(nova_fixtures.LibvirtFixture())
+ self.useFixture(nova_fixtures.CGroupsFixture())
+ self.libvirt = self.useFixture(nova_fixtures.LibvirtFixture())
self.useFixture(nova_fixtures.OSBrickFixture())
self.useFixture(fixtures.MockPatch(
@@ -51,12 +53,12 @@ class ServersTestBase(integrated_helpers._IntegratedTestBase):
self.useFixture(fixtures.MockPatch(
'nova.virt.libvirt.LibvirtDriver._get_local_gb_info',
return_value={'total': 128, 'used': 44, 'free': 84}))
- self.useFixture(fixtures.MockPatch(
+ self.mock_is_valid_hostname = self.useFixture(fixtures.MockPatch(
'nova.virt.libvirt.driver.libvirt_utils.is_valid_hostname',
- return_value=True))
- self.useFixture(fixtures.MockPatch(
+ return_value=True)).mock
+ self.mock_file_open = self.useFixture(fixtures.MockPatch(
'nova.virt.libvirt.driver.libvirt_utils.file_open',
- side_effect=lambda *a, **k: io.BytesIO(b'')))
+ side_effect=lambda *a, **k: io.BytesIO(b''))).mock
self.useFixture(fixtures.MockPatch(
'nova.privsep.utils.supports_direct_io',
return_value=True))
@@ -114,7 +116,7 @@ class ServersTestBase(integrated_helpers._IntegratedTestBase):
def start_compute(
self, hostname='compute1', host_info=None, pci_info=None,
mdev_info=None, vdpa_info=None, libvirt_version=None,
- qemu_version=None,
+ qemu_version=None, cell_name=None, connection=None
):
"""Start a compute service.
@@ -124,34 +126,65 @@ class ServersTestBase(integrated_helpers._IntegratedTestBase):
:param host_info: A fakelibvirt.HostInfo object for the host. Defaults
to a HostInfo with 2 NUMA nodes, 2 cores per node, 2 threads per
core, and 16GB of RAM.
+ :param connection: A fake libvirt connection. You should not provide it
+ directly. However it is used by restart_compute_service to
+ implement restart without loosing the hypervisor state.
:returns: The hostname of the created service, which can be used to
lookup the created service and UUID of the assocaited resource
provider.
"""
+ if connection and (
+ host_info or
+ pci_info or
+ mdev_info or
+ vdpa_info or
+ libvirt_version or
+ qemu_version
+ ):
+ raise ValueError(
+ "Either an existing connection instance can be provided or a "
+ "list of parameters for a new connection"
+ )
def _start_compute(hostname, host_info):
- fake_connection = self._get_connection(
- host_info, pci_info, mdev_info, vdpa_info, libvirt_version,
- qemu_version, hostname,
- )
+ if connection:
+ fake_connection = connection
+ else:
+ fake_connection = self._get_connection(
+ host_info, pci_info, mdev_info, vdpa_info, libvirt_version,
+ qemu_version, hostname,
+ )
+
+ # If the compute is configured with PCI devices then we need to
+ # make sure that the stubs around sysfs has the MAC address
+ # information for the PCI PF devices
+ if pci_info:
+ self.libvirt.update_sriov_mac_address_mapping(
+ pci_info.get_pci_address_mac_mapping())
# This is fun. Firstly we need to do a global'ish mock so we can
# actually start the service.
- with mock.patch('nova.virt.libvirt.host.Host.get_connection',
- return_value=fake_connection):
- compute = self.start_service('compute', host=hostname)
- # Once that's done, we need to tweak the compute "service" to
- # make sure it returns unique objects. We do this inside the
- # mock context to avoid a small window between the end of the
- # context and the tweaking where get_connection would revert to
- # being an autospec mock.
- compute.driver._host.get_connection = lambda: fake_connection
+ orig_con = self.mock_conn.return_value
+ self.mock_conn.return_value = fake_connection
+ compute = self.start_service(
+ 'compute', host=hostname, cell_name=cell_name)
+ # Once that's done, we need to tweak the compute "service" to
+ # make sure it returns unique objects.
+ compute.driver._host.get_connection = lambda: fake_connection
+ # Then we revert the local mock tweaking so the next compute can
+ # get its own
+ self.mock_conn.return_value = orig_con
return compute
# ensure we haven't already registered services with these hostnames
self.assertNotIn(hostname, self.computes)
self.assertNotIn(hostname, self.compute_rp_uuids)
- self.computes[hostname] = _start_compute(hostname, host_info)
+ with mock.patch('nova.virt.node.get_local_node_uuid') as m:
+ m.return_value = str(getattr(uuids, 'node_%s' % hostname))
+ self.computes[hostname] = _start_compute(hostname, host_info)
+ # We need to trigger libvirt.Host() to capture the node-local
+ # uuid while we have it mocked out.
+ self.computes[hostname].driver._host.get_node_uuid()
self.compute_rp_uuids[hostname] = self.placement.get(
'/resource_providers?name=%s' % hostname).body[
@@ -159,6 +192,74 @@ class ServersTestBase(integrated_helpers._IntegratedTestBase):
return hostname
+ def restart_compute_service(
+ self,
+ hostname,
+ host_info=None,
+ pci_info=None,
+ mdev_info=None,
+ vdpa_info=None,
+ libvirt_version=None,
+ qemu_version=None,
+ keep_hypervisor_state=True,
+ ):
+ """Stops the service and starts a new one to have realistic restart
+
+ :param hostname: the hostname of the nova-compute service to be
+ restarted
+ :param keep_hypervisor_state: If True then we reuse the fake connection
+ from the existing driver. If False a new connection will be created
+ based on the other parameters provided
+ """
+ # We are intentionally not calling super() here. Nova's base test class
+ # defines starting and restarting compute service with a very
+ # different signatures and also those calls are cannot be made aware of
+ # the intricacies of the libvirt fixture. So we simply hide that
+ # implementation.
+
+ if keep_hypervisor_state and (
+ host_info or
+ pci_info or
+ mdev_info or
+ vdpa_info or
+ libvirt_version or
+ qemu_version
+ ):
+ raise ValueError(
+ "Either keep_hypervisor_state=True or a list of libvirt "
+ "parameters can be provided but not both"
+ )
+
+ compute = self.computes.pop(hostname)
+ self.compute_rp_uuids.pop(hostname)
+
+ # NOTE(gibi): The service interface cannot be used to simulate a real
+ # service restart as the manager object will not be recreated after a
+ # service.stop() and service.start() therefore the manager state will
+ # survive. For example the resource tracker will not be recreated after
+ # a stop start. The service.kill() call cannot help as it deletes
+ # the service from the DB which is unrealistic and causes that some
+ # operation that refers to the killed host (e.g. evacuate) fails.
+ # So this helper method will stop the original service and then starts
+ # a brand new compute service for the same host and node. This way
+ # a new ComputeManager instance will be created and initialized during
+ # the service startup.
+ compute.stop()
+
+ # this service was running previously, so we have to make sure that
+ # we restart it in the same cell
+ cell_name = self.host_mappings[compute.host].cell_mapping.name
+
+ old_connection = compute.manager.driver._get_connection()
+
+ self.start_compute(
+ hostname, host_info, pci_info, mdev_info, vdpa_info,
+ libvirt_version, qemu_version, cell_name,
+ old_connection if keep_hypervisor_state else None
+ )
+
+ return self.computes[hostname]
+
class LibvirtMigrationMixin(object):
"""A simple mixin to facilliate successful libvirt live migrations
@@ -316,6 +417,21 @@ class LibvirtNeutronFixture(nova_fixtures.NeutronFixture):
'binding:vif_type': 'ovs',
'binding:vnic_type': 'normal',
}
+ network_3_port_2 = {
+ 'id': '132c3875-b175-4b20-8a57-7a76219a13ae',
+ 'network_id': network_3['id'],
+ 'status': 'ACTIVE',
+ 'mac_address': 'd2:0b:fd:99:89:8b',
+ 'fixed_ips': [
+ {
+ 'ip_address': '192.168.2.7',
+ 'subnet_id': subnet_3['id']
+ }
+ ],
+ 'binding:vif_details': {},
+ 'binding:vif_type': 'ovs',
+ 'binding:vnic_type': 'remote-managed',
+ }
network_4_port_1 = {
'id': 'b4cd0b93-2ac8-40a7-9fa4-2cd680ccdf3e',
'network_id': network_4['id'],
@@ -361,6 +477,37 @@ class LibvirtNeutronFixture(nova_fixtures.NeutronFixture):
'binding:vif_type': 'hw_veb',
'binding:vnic_type': 'direct',
}
+ network_4_port_4 = {
+ 'id': 'a31e381d-41ec-41e4-b5a5-ec4ef705fafa',
+ 'network_id': network_1['id'],
+ 'status': 'ACTIVE',
+ 'mac_address': '71:ce:c7:2b:cd:dd',
+ 'fixed_ips': [
+ {
+ 'ip_address': '192.168.4.9',
+ 'subnet_id': subnet_4['id']
+ }
+ ],
+ 'binding:vif_details': {},
+ 'binding:vif_type': 'ovs',
+ 'binding:vnic_type': 'remote-managed',
+ }
+
+ network_4_port_pf = {
+ 'id': 'c6f51315-9202-416f-9e2f-eb78b3ac36d9',
+ 'network_id': network_4['id'],
+ 'status': 'ACTIVE',
+ 'mac_address': 'b5:bc:2e:e7:51:01',
+ 'fixed_ips': [
+ {
+ 'ip_address': '192.168.4.8',
+ 'subnet_id': subnet_4['id']
+ }
+ ],
+ 'binding:vif_details': {'vlan': 42},
+ 'binding:vif_type': 'hostdev_physical',
+ 'binding:vnic_type': 'direct-physical',
+ }
def __init__(self, test):
super(LibvirtNeutronFixture, self).__init__(test)
diff --git a/nova/tests/functional/libvirt/test_device_bus_migration.py b/nova/tests/functional/libvirt/test_device_bus_migration.py
new file mode 100644
index 0000000000..3852e31c68
--- /dev/null
+++ b/nova/tests/functional/libvirt/test_device_bus_migration.py
@@ -0,0 +1,407 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import datetime
+from unittest import mock
+
+import fixtures
+from oslo_utils.fixture import uuidsentinel as uuids
+
+from nova.cmd import manage
+from nova import context as nova_context
+from nova import objects
+from nova import test
+from nova.tests.functional.libvirt import base
+from nova.virt.libvirt import config as vconfig
+from nova.virt.libvirt import driver as libvirt_driver
+
+
+class LibvirtDeviceBusMigration(base.ServersTestBase):
+
+ microversion = 'latest'
+ # needed for move operations
+ ADMIN_API = True
+
+ def setUp(self):
+ super().setUp()
+ self.context = nova_context.get_admin_context()
+ self.compute_hostname = self.start_compute()
+ self.compute = self.computes[self.compute_hostname]
+ self.commands = manage.ImagePropertyCommands()
+
+ def _unset_stashed_image_properties(self, server_id, properties):
+ instance = objects.Instance.get_by_uuid(self.context, server_id)
+ for p in properties:
+ instance.system_metadata.pop(f'image_{p}')
+ instance.save()
+
+ def _assert_stashed_image_properties(self, server_id, properties):
+ instance = objects.Instance.get_by_uuid(self.context, server_id)
+ for p, value in properties.items():
+ self.assertEqual(instance.system_metadata.get(f'image_{p}'), value)
+
+ def _assert_stashed_image_properties_persist(self, server, properties):
+ # Assert the stashed properties persist across a host reboot
+ self.restart_compute_service(self.compute_hostname)
+ self._assert_stashed_image_properties(server['id'], properties)
+
+ # Assert the stashed properties persist across a guest reboot
+ self._reboot_server(server, hard=True)
+ self._assert_stashed_image_properties(server['id'], properties)
+
+ # Assert the stashed properties persist across a migration
+ if 'other_compute' not in self.computes:
+ self.start_compute('other_compute')
+ # TODO(stephenfin): The mock of 'migrate_disk_and_power_off' should
+ # probably be less...dumb
+ with mock.patch(
+ 'nova.virt.libvirt.driver.LibvirtDriver'
+ '.migrate_disk_and_power_off', return_value='{}',
+ ):
+ self._migrate_server(server)
+ self._confirm_resize(server)
+ self._assert_stashed_image_properties(server['id'], properties)
+
+ def test_default_image_property_registration(self):
+ """Assert that the defaults for various hw image properties don't
+ change over the lifecycle of an instance.
+ """
+ default_image_properties = {
+ 'hw_machine_type': 'pc',
+ 'hw_cdrom_bus': 'ide',
+ 'hw_disk_bus': 'virtio',
+ 'hw_input_bus': 'usb',
+ 'hw_pointer_model': 'usbtablet',
+ 'hw_video_model': 'virtio',
+ 'hw_vif_model': 'virtio',
+ }
+
+ server = self._create_server(networks='none')
+ self._assert_stashed_image_properties(
+ server['id'], default_image_properties)
+
+ # Unset the defaults here to ensure that init_host resets them
+ # when the compute restarts the libvirt driver
+ self._unset_stashed_image_properties(
+ server['id'], libvirt_driver.REGISTER_IMAGE_PROPERTY_DEFAULTS)
+
+ # Assert the defaults persist across a host reboot, guest reboot, and
+ # guest migration
+ self._assert_stashed_image_properties_persist(
+ server, default_image_properties)
+
+ def test_non_default_image_property_registration(self):
+ """Assert that non-default values for various hw image properties
+ don't change over the lifecycle of an instance.
+ """
+ non_default_image_properties = {
+ 'hw_machine_type': 'q35',
+ 'hw_cdrom_bus': 'sata',
+ 'hw_disk_bus': 'sata',
+ 'hw_input_bus': 'virtio',
+ 'hw_video_model': 'qxl',
+ 'hw_vif_model': 'e1000',
+ }
+ self.glance.create(
+ None,
+ {
+ 'id': uuids.hw_bus_model_image_uuid,
+ 'name': 'hw_bus_model_image',
+ 'created_at': datetime.datetime(2011, 1, 1, 1, 2, 3),
+ 'updated_at': datetime.datetime(2011, 1, 1, 1, 2, 3),
+ 'deleted_at': None,
+ 'deleted': False,
+ 'status': 'active',
+ 'is_public': False,
+ 'container_format': 'bare',
+ 'disk_format': 'qcow2',
+ 'size': '74185822',
+ 'min_ram': 0,
+ 'min_disk': 0,
+ 'protected': False,
+ 'visibility': 'public',
+ 'tags': [],
+ 'properties': non_default_image_properties,
+ }
+ )
+ server = self._create_server(
+ networks='none', image_uuid=uuids.hw_bus_model_image_uuid)
+ self._assert_stashed_image_properties(
+ server['id'], non_default_image_properties)
+
+ # Assert the non defaults persist across a host reboot, guest reboot,
+ # and guest migration
+ self._assert_stashed_image_properties_persist(
+ server, non_default_image_properties)
+
+ def test_default_image_property_persists_across_osinfo_changes(self):
+ # Create a server with default image properties
+ default_image_properties = {
+ 'hw_vif_model': 'virtio',
+ 'hw_disk_bus': 'virtio',
+ }
+ server = self._create_server(networks='none')
+ self._assert_stashed_image_properties(
+ server['id'], default_image_properties)
+
+ with test.nested(
+ mock.patch('nova.virt.osinfo.HardwareProperties.network_model',
+ new=mock.PropertyMock()),
+ mock.patch('nova.virt.osinfo.HardwareProperties.disk_model',
+ new=mock.PropertyMock())
+ ) as (mock_nw_model, mock_disk_model):
+ # osinfo returning new things
+ mock_nw_model.return_value = 'e1000'
+ mock_disk_model.return_value = 'sata'
+
+ # Assert the defaults persist across a host reboot, guest reboot,
+ # and guest migration
+ self._assert_stashed_image_properties_persist(
+ server, default_image_properties)
+
+ def test_default_image_property_persists_across_host_flag_changes(self):
+ # Set the default to ps2 via host flag
+ self.flags(pointer_model='ps2mouse')
+ # Restart compute to pick up ps2 setting, which means the guest will
+ # not get a prescribed pointer device
+ self.restart_compute_service(self.compute_hostname)
+
+ # Create a server with default image properties
+ default_image_properties1 = {
+ 'hw_pointer_model': None,
+ 'hw_input_bus': None,
+ }
+ server1 = self._create_server(networks='none')
+ self._assert_stashed_image_properties(
+ server1['id'], default_image_properties1)
+
+ # Assert the defaults persist across a host flag change
+ self.flags(pointer_model='usbtablet')
+ # Restart compute to pick up usb setting
+ self.restart_compute_service(self.compute_hostname)
+ self._assert_stashed_image_properties(
+ server1['id'], default_image_properties1)
+
+ # Assert the defaults persist across a host reboot, guest reboot, and
+ # guest migration
+ self._assert_stashed_image_properties_persist(
+ server1, default_image_properties1)
+
+ # Create a server with new default image properties since the host flag
+ # change
+ default_image_properties2 = {
+ 'hw_pointer_model': 'usbtablet',
+ 'hw_input_bus': 'usb',
+ }
+ server2 = self._create_server(networks='none')
+ self._assert_stashed_image_properties(
+ server2['id'], default_image_properties2)
+
+ # Assert the defaults persist across a host reboot, guest reboot, and
+ # guest migration
+ self._assert_stashed_image_properties_persist(
+ server2, default_image_properties2)
+
+ # Finally, try changing the host flag again to None. Note that it is
+ # not possible for a user to specify None for this option:
+ # https://bugs.launchpad.net/nova/+bug/1866106
+ self.flags(pointer_model=None)
+ # Restart compute to pick up None setting
+ self.restart_compute_service(self.compute_hostname)
+ self._assert_stashed_image_properties(
+ server1['id'], default_image_properties1)
+ self._assert_stashed_image_properties(
+ server2['id'], default_image_properties2)
+
+ # Create a server since the host flag change to None. The defaults
+ # should be the same as for ps2mouse
+ server3 = self._create_server(networks='none')
+ self._assert_stashed_image_properties(
+ server3['id'], default_image_properties1)
+
+ # Assert the defaults persist across a host reboot, guest reboot, and
+ # guest migration for server1, server2, and server3
+ self._assert_stashed_image_properties_persist(
+ server1, default_image_properties1)
+ self._assert_stashed_image_properties_persist(
+ server2, default_image_properties2)
+ self._assert_stashed_image_properties_persist(
+ server3, default_image_properties1)
+
+ def _assert_guest_config(self, config, image_properties):
+ verified_properties = set()
+
+ # Verify the machine type matches the image property
+ value = image_properties.get('hw_machine_type')
+ if value:
+ self.assertEqual(value, config.os_mach_type)
+ verified_properties.add('hw_machine_type')
+
+ # Look at all the devices and verify that their bus and model values
+ # match the desired image properties
+ for device in config.devices:
+ if isinstance(device, vconfig.LibvirtConfigGuestDisk):
+ if device.source_device == 'cdrom':
+ value = image_properties.get('hw_cdrom_bus')
+ if value:
+ self.assertEqual(value, device.target_bus)
+ verified_properties.add('hw_cdrom_bus')
+
+ if device.source_device == 'disk':
+ value = image_properties.get('hw_disk_bus')
+ if value:
+ self.assertEqual(value, device.target_bus)
+ verified_properties.add('hw_disk_bus')
+
+ if isinstance(device, vconfig.LibvirtConfigGuestInput):
+ value = image_properties.get('hw_input_bus')
+ if value:
+ self.assertEqual(value, device.bus)
+ verified_properties.add('hw_input_bus')
+
+ if device.type == 'tablet':
+ value = image_properties.get('hw_pointer_model')
+ if value:
+ self.assertEqual('usbtablet', value)
+ verified_properties.add('hw_pointer_model')
+
+ if isinstance(device, vconfig.LibvirtConfigGuestVideo):
+ value = image_properties.get('hw_video_model')
+ if value:
+ self.assertEqual(value, device.type)
+ verified_properties.add('hw_video_model')
+
+ if isinstance(device, vconfig.LibvirtConfigGuestInterface):
+ value = image_properties.get('hw_vif_model')
+ if value:
+ self.assertEqual(value, device.model)
+ verified_properties.add('hw_vif_model')
+
+ # If hw_pointer_model or hw_input_bus are in the image properties but
+ # we did not encounter devices for them, they should be None
+ for p in ['hw_pointer_model', 'hw_input_bus']:
+ if p in image_properties and p not in verified_properties:
+ self.assertIsNone(image_properties[p])
+ verified_properties.add(p)
+
+ # Assert that we verified all of the image properties
+ self.assertEqual(
+ len(image_properties), len(verified_properties),
+ f'image_properties: {image_properties}, '
+ f'verified_properties: {verified_properties}'
+ )
+
+ def test_machine_type_and_bus_and_model_migration(self):
+ """Assert the behaviour of the nova-manage image_property set command
+ when used to migrate between machine types and associated device buses.
+ """
+ # Create a pass-through mock around _get_guest_config to capture the
+ # config of an instance so we can assert things about it later.
+ # TODO(lyarwood): This seems like a useful thing to do in the libvirt
+ # func tests for all computes we start?
+ self.guest_configs = {}
+ orig_get_config = self.compute.driver._get_guest_config
+
+ def _get_guest_config(_self, *args, **kwargs):
+ guest_config = orig_get_config(*args, **kwargs)
+ instance = args[0]
+ self.guest_configs[instance.uuid] = guest_config
+ return self.guest_configs[instance.uuid]
+
+ self.useFixture(fixtures.MonkeyPatch(
+ 'nova.virt.libvirt.LibvirtDriver._get_guest_config',
+ _get_guest_config))
+
+ pc_image_properties = {
+ 'hw_machine_type': 'pc',
+ 'hw_cdrom_bus': 'ide',
+ 'hw_disk_bus': 'sata',
+ 'hw_input_bus': 'usb',
+ 'hw_pointer_model': 'usbtablet',
+ 'hw_video_model': 'cirrus',
+ 'hw_vif_model': 'e1000',
+ }
+ self.glance.create(
+ None,
+ {
+ 'id': uuids.pc_image_uuid,
+ 'name': 'pc_image',
+ 'created_at': datetime.datetime(2011, 1, 1, 1, 2, 3),
+ 'updated_at': datetime.datetime(2011, 1, 1, 1, 2, 3),
+ 'deleted_at': None,
+ 'deleted': False,
+ 'status': 'active',
+ 'is_public': False,
+ 'container_format': 'bare',
+ 'disk_format': 'qcow2',
+ 'size': '74185822',
+ 'min_ram': 0,
+ 'min_disk': 0,
+ 'protected': False,
+ 'visibility': 'public',
+ 'tags': [],
+ 'properties': pc_image_properties,
+ }
+ )
+
+ body = self._build_server(
+ image_uuid=uuids.pc_image_uuid, networks='auto')
+
+ # Add a cdrom to be able to verify hw_cdrom_bus
+ body['block_device_mapping_v2'] = [{
+ 'source_type': 'blank',
+ 'destination_type': 'local',
+ 'disk_bus': 'ide',
+ 'device_type': 'cdrom',
+ 'boot_index': 0,
+ }]
+
+ # Create the server and verify stashed image properties
+ server = self.api.post_server({'server': body})
+ self._wait_for_state_change(server, 'ACTIVE')
+ self._assert_stashed_image_properties(
+ server['id'], pc_image_properties)
+
+ # Verify the guest config matches the image properties
+ guest_config = self.guest_configs[server['id']]
+ self._assert_guest_config(guest_config, pc_image_properties)
+
+ # Set the image properties with nova-manage
+ self._stop_server(server)
+
+ q35_image_properties = {
+ 'hw_machine_type': 'q35',
+ 'hw_cdrom_bus': 'sata',
+ 'hw_disk_bus': 'virtio',
+ 'hw_input_bus': 'virtio',
+ 'hw_pointer_model': 'usbtablet',
+ 'hw_video_model': 'qxl',
+ 'hw_vif_model': 'virtio',
+ }
+ property_list = [
+ f'{p}={value}' for p, value in q35_image_properties.items()
+ ]
+
+ self.commands.set(
+ instance_uuid=server['id'], image_properties=property_list)
+
+ # Verify the updated stashed image properties
+ self._start_server(server)
+ self._assert_stashed_image_properties(
+ server['id'], q35_image_properties)
+
+ # The guest config should reflect the new values except for the cdrom
+ # block device bus which is taken from the block_device_mapping record,
+ # not system_metadata, so it cannot be changed
+ q35_image_properties['hw_cdrom_bus'] = 'ide'
+ guest_config = self.guest_configs[server['id']]
+ self._assert_guest_config(guest_config, q35_image_properties)
diff --git a/nova/tests/functional/libvirt/test_evacuate.py b/nova/tests/functional/libvirt/test_evacuate.py
index 531cefc63c..0e89a3cdb6 100644
--- a/nova/tests/functional/libvirt/test_evacuate.py
+++ b/nova/tests/functional/libvirt/test_evacuate.py
@@ -13,10 +13,10 @@
# under the License.
import collections
-import fixtures
-import mock
import os.path
+from unittest import mock
+import fixtures
from oslo_utils import fileutils
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import units
@@ -415,7 +415,9 @@ class _LibvirtEvacuateTest(integrated_helpers.InstanceHelperMixin):
with mock.patch.object(fakelibvirt.Connection, 'getHostname',
return_value=name):
- compute = self.start_service('compute', host=name)
+ with mock.patch('nova.virt.node.get_local_node_uuid') as m:
+ m.return_value = str(getattr(uuids, 'node_%s' % name))
+ compute = self.start_service('compute', host=name)
compute.driver._host.get_connection().getHostname = lambda: name
return compute
@@ -427,6 +429,7 @@ class _LibvirtEvacuateTest(integrated_helpers.InstanceHelperMixin):
self.useFixture(nova_fixtures.NeutronFixture(self))
self.useFixture(nova_fixtures.GlanceFixture(self))
self.useFixture(func_fixtures.PlacementFixture())
+ self.useFixture(nova_fixtures.CGroupsFixture())
fake_network.set_stub_network_methods(self)
api_fixture = self.useFixture(
diff --git a/nova/tests/functional/libvirt/test_live_migration.py b/nova/tests/functional/libvirt/test_live_migration.py
index f714a5f043..31ff9dfca0 100644
--- a/nova/tests/functional/libvirt/test_live_migration.py
+++ b/nova/tests/functional/libvirt/test_live_migration.py
@@ -12,6 +12,7 @@
# License for the specific language governing permissions and limitations
# under the License.
+import copy
import threading
from lxml import etree
@@ -19,15 +20,18 @@ from nova.tests.functional import integrated_helpers
from nova.tests.functional.libvirt import base as libvirt_base
-class LiveMigrationQueuedAbortTest(
+class LiveMigrationWithLockBase(
libvirt_base.LibvirtMigrationMixin,
libvirt_base.ServersTestBase,
integrated_helpers.InstanceHelperMixin
):
- """Functional test for bug 1949808.
+ """Base for live migration tests which require live migration to be
+ locked for certain period of time and then unlocked afterwards.
- This test is used to confirm that VM's state is reverted properly
- when queued Live migration is aborted.
+ Separate base class is needed because locking mechanism could work
+ in an unpredicted way if two tests for the same class would try to
+ use it simultaneously. Every test using this mechanism should use
+ separate class instance.
"""
api_major_version = 'v2.1'
@@ -69,7 +73,15 @@ class LiveMigrationQueuedAbortTest(
dom = conn.lookupByUUIDString(server)
dom.complete_job()
- def test_queued_live_migration_abort(self):
+
+class LiveMigrationQueuedAbortTestVmStatus(LiveMigrationWithLockBase):
+ """Functional test for bug #1949808.
+
+ This test is used to confirm that VM's state is reverted properly
+ when queued Live migration is aborted.
+ """
+
+ def test_queued_live_migration_abort_vm_status(self):
# Lock live migrations
self.lock_live_migration.acquire()
@@ -105,13 +117,96 @@ class LiveMigrationQueuedAbortTest(
'/servers/%s/migrations/%s' % (self.server_b['id'],
serverb_migration['id']))
self._wait_for_migration_status(self.server_b, ['cancelled'])
- # Unlock live migrations and confirm that server_a becomes
- # active again after successful live migration
+ # Unlock live migrations and confirm that both servers become
+ # active again after successful (server_a) and aborted
+ # (server_b) live migrations
self.lock_live_migration.release()
self._wait_for_state_change(self.server_a, 'ACTIVE')
+ self._wait_for_state_change(self.server_b, 'ACTIVE')
+
+
+class LiveMigrationQueuedAbortTestLeftoversRemoved(LiveMigrationWithLockBase):
+ """Functional test for bug #1960412.
+
+ Placement allocations for live migration and inactive Neutron port
+ bindings on destination host created by Nova control plane when live
+ migration is initiated should be removed when queued live migration
+ is aborted using Nova API.
+ """
+
+ def test_queued_live_migration_abort_leftovers_removed(self):
+ # Lock live migrations
+ self.lock_live_migration.acquire()
+
+ # Start instances: first one would be used to occupy
+ # executor's live migration queue, second one would be used
+ # to actually confirm that queued live migrations are
+ # aborted properly.
+ # port_1 is created automatically when neutron fixture is
+ # initialized, port_2 is created manually
+ self.server_a = self._create_server(
+ host=self.src_hostname,
+ networks=[{'port': self.neutron.port_1['id']}])
+ self.neutron.create_port({'port': self.neutron.port_2})
+ self.server_b = self._create_server(
+ host=self.src_hostname,
+ networks=[{'port': self.neutron.port_2['id']}])
+ # Issue live migration requests for both servers. We expect that
+ # server_a live migration would be running, but locked by
+ # self.lock_live_migration and server_b live migration would be
+ # queued.
+ self._live_migrate(
+ self.server_a,
+ migration_expected_state='running',
+ server_expected_state='MIGRATING'
+ )
+ self._live_migrate(
+ self.server_b,
+ migration_expected_state='queued',
+ server_expected_state='MIGRATING'
+ )
- # FIXME(artom) Assert the server_b never comes out of 'MIGRATING'
- self.assertRaises(
- AssertionError,
- self._wait_for_state_change, self.server_b, 'ACTIVE')
- self._wait_for_state_change(self.server_b, 'MIGRATING')
+ # Abort live migration for server_b
+ migration_server_a = self.api.api_get(
+ '/os-migrations?instance_uuid=%s' % self.server_a['id']
+ ).body['migrations'].pop()
+ migration_server_b = self.api.api_get(
+ '/os-migrations?instance_uuid=%s' % self.server_b['id']
+ ).body['migrations'].pop()
+
+ self.api.api_delete(
+ '/servers/%s/migrations/%s' % (self.server_b['id'],
+ migration_server_b['id']))
+ self._wait_for_migration_status(self.server_b, ['cancelled'])
+ # Unlock live migrations and confirm that both servers become
+ # active again after successful (server_a) and aborted
+ # (server_b) live migrations
+ self.lock_live_migration.release()
+ self._wait_for_state_change(self.server_a, 'ACTIVE')
+ self._wait_for_migration_status(self.server_a, ['completed'])
+ self._wait_for_state_change(self.server_b, 'ACTIVE')
+
+ # Allocations for both successful (server_a) and aborted queued live
+ # migration (server_b) should be removed.
+ allocations_server_a_migration = self.placement.get(
+ '/allocations/%s' % migration_server_a['uuid']
+ ).body['allocations']
+ self.assertEqual({}, allocations_server_a_migration)
+ allocations_server_b_migration = self.placement.get(
+ '/allocations/%s' % migration_server_b['uuid']
+ ).body['allocations']
+ self.assertEqual({}, allocations_server_b_migration)
+
+ # INACTIVE port binding on destination host should be removed when
+ # queued live migration is aborted, so only 1 port binding would
+ # exist for ports attached to both servers.
+ port_binding_server_a = copy.deepcopy(
+ self.neutron._port_bindings[self.neutron.port_1['id']]
+ )
+ self.assertEqual(1, len(port_binding_server_a))
+ self.assertNotIn('src', port_binding_server_a)
+ port_binding_server_b = copy.deepcopy(
+ self.neutron._port_bindings[self.neutron.port_2['id']]
+ )
+ self.assertEqual(1, len(port_binding_server_b))
+ self.assertNotIn('dest', port_binding_server_b)
diff --git a/nova/tests/functional/libvirt/test_machine_type.py b/nova/tests/functional/libvirt/test_machine_type.py
index 3b496189d0..04c38b7338 100644
--- a/nova/tests/functional/libvirt/test_machine_type.py
+++ b/nova/tests/functional/libvirt/test_machine_type.py
@@ -103,7 +103,7 @@ class LibvirtMachineTypeTest(base.ServersTestBase):
self.computes['compute1'].stop()
self._unset_machine_type(server_without['id'])
- self.flags(hw_machine_type='x86_64=pc-q35-1.2.3', group='libvirt')
+ self.flags(hw_machine_type='x86_64=pc-q35-2.4', group='libvirt')
# Restart the compute
self.computes['compute1'].start()
@@ -115,9 +115,9 @@ class LibvirtMachineTypeTest(base.ServersTestBase):
# is able to pass. This just keeps the tests clean.
self._reboot_server(server_without, hard=True)
- # Assert server_without now has a machine type of pc-q35-1.2.3 picked
+ # Assert server_without now has a machine type of pc-q35-2.4 picked
# up from [libvirt]hw_machine_type during init_host
- self._assert_machine_type(server_without['id'], 'pc-q35-1.2.3')
+ self._assert_machine_type(server_without['id'], 'pc-q35-2.4')
def test_machine_type_after_config_change(self):
"""Assert new instances pick up a new default machine type after the
@@ -129,11 +129,11 @@ class LibvirtMachineTypeTest(base.ServersTestBase):
self._assert_machine_type(server_with['id'], 'q35')
self._assert_machine_type(server_without['id'], 'pc')
- self.flags(hw_machine_type='x86_64=pc-q35-1.2.3', group='libvirt')
+ self.flags(hw_machine_type='x86_64=pc-q35-2.4', group='libvirt')
server_with_new, server_without_new = self._create_servers()
self._assert_machine_type(server_with_new['id'], 'q35')
- self._assert_machine_type(server_without_new['id'], 'pc-q35-1.2.3')
+ self._assert_machine_type(server_without_new['id'], 'pc-q35-2.4')
def test_machine_type_after_server_rebuild(self):
"""Assert that the machine type of an instance changes with a full
@@ -202,26 +202,26 @@ class LibvirtMachineTypeTest(base.ServersTestBase):
)
def test_machine_type_update_stopped(self):
- self.flags(hw_machine_type='x86_64=pc-1.2.3', group='libvirt')
+ self.flags(hw_machine_type='x86_64=pc-1.2', group='libvirt')
server = self._create_server(networks='none')
- self._assert_machine_type(server['id'], 'pc-1.2.3')
+ self._assert_machine_type(server['id'], 'pc-1.2')
self._stop_server(server)
machine_type_utils.update_machine_type(
self.context,
server['id'],
- 'pc-1.2.4'
+ 'pc-1.2'
)
self._start_server(server)
- self._assert_machine_type(server['id'], 'pc-1.2.4')
+ self._assert_machine_type(server['id'], 'pc-1.2')
def test_machine_type_update_blocked_active(self):
- self.flags(hw_machine_type='x86_64=pc-1.2.3', group='libvirt')
+ self.flags(hw_machine_type='x86_64=pc-1.2', group='libvirt')
server = self._create_server(networks='none')
- self._assert_machine_type(server['id'], 'pc-1.2.3')
+ self._assert_machine_type(server['id'], 'pc-1.2')
self.assertRaises(
exception.InstanceInvalidState,
@@ -247,10 +247,10 @@ class LibvirtMachineTypeTest(base.ServersTestBase):
)
def test_machine_type_update_blocked_between_versioned_and_alias(self):
- self.flags(hw_machine_type='x86_64=pc-1.2.3', group='libvirt')
+ self.flags(hw_machine_type='x86_64=pc-1.2', group='libvirt')
server = self._create_server(networks='none')
- self._assert_machine_type(server['id'], 'pc-1.2.3')
+ self._assert_machine_type(server['id'], 'pc-1.2')
self._stop_server(server)
self.assertRaises(
@@ -372,7 +372,7 @@ class LibvirtMachineTypeTest(base.ServersTestBase):
)
# Change the actual config on the compute
- self.flags(hw_machine_type='x86_64=pc-q35-1.2', group='libvirt')
+ self.flags(hw_machine_type='x86_64=pc-q35-2.4', group='libvirt')
# Assert the existing instances remain the same after being rebooted or
# unshelved, rebuilding their domain configs
@@ -389,4 +389,4 @@ class LibvirtMachineTypeTest(base.ServersTestBase):
# Assert that new instances are spawned with the expected machine types
server_with_new, server_without_new = self._create_servers()
self._assert_machine_type(server_with_new['id'], 'q35')
- self._assert_machine_type(server_without_new['id'], 'pc-q35-1.2')
+ self._assert_machine_type(server_without_new['id'], 'pc-q35-2.4')
diff --git a/nova/tests/functional/libvirt/test_numa_live_migration.py b/nova/tests/functional/libvirt/test_numa_live_migration.py
index 2f3897d6b2..0e504d2df2 100644
--- a/nova/tests/functional/libvirt/test_numa_live_migration.py
+++ b/nova/tests/functional/libvirt/test_numa_live_migration.py
@@ -206,10 +206,8 @@ class NUMALiveMigrationPositiveTests(NUMALiveMigrationPositiveBase):
# Increase cpu_dedicated_set to 0-3, expecting the live migrated server
# to end up on 2,3.
self.flags(cpu_dedicated_set='0-3', group='compute')
- self.computes['host_a'] = self.restart_compute_service(
- self.computes['host_a'])
- self.computes['host_b'] = self.restart_compute_service(
- self.computes['host_b'])
+ self.restart_compute_service('host_a')
+ self.restart_compute_service('host_b')
# Live migrate, RPC-pinning the destination host if asked
if pin_dest:
@@ -333,10 +331,8 @@ class NUMALiveMigrationRollbackTests(NUMALiveMigrationPositiveBase):
# Increase cpu_dedicated_set to 0-3, expecting the live migrated server
# to end up on 2,3.
self.flags(cpu_dedicated_set='0-3', group='compute')
- self.computes['host_a'] = self.restart_compute_service(
- self.computes['host_a'])
- self.computes['host_b'] = self.restart_compute_service(
- self.computes['host_b'])
+ self.restart_compute_service('host_a')
+ self.restart_compute_service('host_b')
# Live migrate, RPC-pinning the destination host if asked. This is a
# rollback test, so server_a is expected to remain on host_a.
diff --git a/nova/tests/functional/libvirt/test_numa_servers.py b/nova/tests/functional/libvirt/test_numa_servers.py
index fd09a11e20..5b73e1b965 100644
--- a/nova/tests/functional/libvirt/test_numa_servers.py
+++ b/nova/tests/functional/libvirt/test_numa_servers.py
@@ -13,11 +13,11 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
-import testtools
+from unittest import mock
from oslo_config import cfg
from oslo_log import log as logging
+import testtools
import nova
from nova.compute import manager
@@ -346,6 +346,76 @@ class NUMAServersTest(NUMAServersTestBase):
# There shouldn't be any hosts available to satisfy this request
self._run_build_test(flavor_id, end_status='ERROR')
+ def test_create_server_with_mixed_policy_asymmetric_multi_numa(self):
+ """Boot an instance stretched to two NUMA nodes requesting only
+ shared CPUs in one NUMA and only dedicated in the other NUMA node.
+ """
+ # shared dedicated
+ # NUMA0 pCPU | 0 | 2 3
+ # NUMA1 pCPU | | 6 7
+ self.flags(
+ cpu_shared_set='0',
+ cpu_dedicated_set='2,3,6,7',
+ group='compute',
+ )
+ self.flags(vcpu_pin_set=None)
+
+ host_info = fakelibvirt.HostInfo(
+ cpu_nodes=2, cpu_sockets=1, cpu_cores=4, cpu_threads=1)
+ self.start_compute(host_info=host_info, hostname='compute1')
+
+ # sanity check the created host topology object; this is really just a
+ # test of the fakelibvirt module
+ host_numa = objects.NUMATopology.obj_from_db_obj(
+ objects.ComputeNode.get_by_nodename(
+ self.ctxt, 'compute1',
+ ).numa_topology
+ )
+ self.assertEqual(2, len(host_numa.cells))
+ self.assertEqual({0}, host_numa.cells[0].cpuset)
+ self.assertEqual({2, 3}, host_numa.cells[0].pcpuset)
+
+ self.assertEqual(set(), host_numa.cells[1].cpuset)
+ self.assertEqual({6, 7}, host_numa.cells[1].pcpuset)
+
+ # create a flavor with 1 shared and 2 dedicated CPUs stretched to
+ # different NUMA nodes
+ extra_spec = {
+ 'hw:cpu_policy': 'mixed',
+ 'hw:cpu_dedicated_mask': '^0',
+ 'hw:numa_nodes': '2',
+ 'hw:numa_cpus.0': '0',
+ 'hw:numa_cpus.1': '1,2',
+ 'hw:numa_mem.0': '256',
+ 'hw:numa_mem.1': '768',
+ }
+ flavor_id = self._create_flavor(
+ vcpu=3, memory_mb=1024, extra_spec=extra_spec)
+ expected_usage = {
+ 'DISK_GB': 20, 'MEMORY_MB': 1024, 'PCPU': 2, 'VCPU': 1,
+ }
+ # The only possible solution (ignoring the order of vCPU1,2):
+ # vCPU 0 => pCPU 0, NUMA0, shared
+ # vCPU 1 => pCPU 6, NUMA1, dedicated
+ # vCPU 2 => pCPU 7, NUMA1, dedicated
+ server = self._run_build_test(
+ flavor_id, expected_usage=expected_usage)
+
+ # sanity check the instance topology
+ inst = objects.Instance.get_by_uuid(self.ctxt, server['id'])
+ self.assertEqual(2, len(inst.numa_topology.cells))
+
+ self.assertEqual({0}, inst.numa_topology.cells[0].cpuset)
+ self.assertEqual(set(), inst.numa_topology.cells[0].pcpuset)
+ self.assertIsNone(inst.numa_topology.cells[0].cpu_pinning)
+
+ self.assertEqual(set(), inst.numa_topology.cells[1].cpuset)
+ self.assertEqual({1, 2}, inst.numa_topology.cells[1].pcpuset)
+ self.assertEqual(
+ {6, 7},
+ set(inst.numa_topology.cells[1].cpu_pinning.values())
+ )
+
def test_create_server_with_dedicated_policy_old_configuration(self):
"""Create a server using the legacy extra spec and configuration.
@@ -731,7 +801,7 @@ class NUMAServersTest(NUMAServersTestBase):
for host, compute_rp_uuid in self.compute_rp_uuids.items():
if host == original_host:
# the host that had the instance should no longer have
- # alocations since the resize has been confirmed
+ # allocations since the resize has been confirmed
expected_usage = {'VCPU': 0, 'PCPU': 0, 'DISK_GB': 0,
'MEMORY_MB': 0}
else:
@@ -1187,10 +1257,8 @@ class ReshapeForPCPUsTest(NUMAServersTestBase):
self.flags(cpu_dedicated_set='0-7', group='compute')
self.flags(vcpu_pin_set=None)
- computes = {}
- for host, compute in self.computes.items():
- computes[host] = self.restart_compute_service(compute)
- self.computes = computes
+ for host in list(self.computes.keys()):
+ self.restart_compute_service(host)
# verify that the inventory, usages and allocation are correct after
# the reshape
diff --git a/nova/tests/functional/libvirt/test_pci_in_placement.py b/nova/tests/functional/libvirt/test_pci_in_placement.py
new file mode 100644
index 0000000000..41d6c8e008
--- /dev/null
+++ b/nova/tests/functional/libvirt/test_pci_in_placement.py
@@ -0,0 +1,1997 @@
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+from unittest import mock
+
+import ddt
+import fixtures
+import os_resource_classes
+import os_traits
+from oslo_config import cfg
+from oslo_log import log as logging
+from oslo_serialization import jsonutils
+
+from nova import exception
+from nova.tests.fixtures import libvirt as fakelibvirt
+from nova.tests.functional.libvirt import test_pci_sriov_servers
+
+CONF = cfg.CONF
+LOG = logging.getLogger(__name__)
+
+
+class PlacementPCIReportingTests(test_pci_sriov_servers._PCIServersTestBase):
+ PCI_RC = f"CUSTOM_PCI_{fakelibvirt.PCI_VEND_ID}_{fakelibvirt.PCI_PROD_ID}"
+ PF_RC = f"CUSTOM_PCI_{fakelibvirt.PCI_VEND_ID}_{fakelibvirt.PF_PROD_ID}"
+ VF_RC = f"CUSTOM_PCI_{fakelibvirt.PCI_VEND_ID}_{fakelibvirt.VF_PROD_ID}"
+
+ # Just placeholders to satisfy the base class. The real value will be
+ # redefined by the tests
+ PCI_DEVICE_SPEC = []
+ PCI_ALIAS = [
+ jsonutils.dumps(x)
+ for x in (
+ {
+ "vendor_id": fakelibvirt.PCI_VEND_ID,
+ "product_id": fakelibvirt.PCI_PROD_ID,
+ "name": "a-pci-dev",
+ },
+ {
+ "vendor_id": fakelibvirt.PCI_VEND_ID,
+ "product_id": fakelibvirt.PF_PROD_ID,
+ "device_type": "type-PF",
+ "name": "a-pf",
+ },
+ {
+ "vendor_id": fakelibvirt.PCI_VEND_ID,
+ "product_id": fakelibvirt.VF_PROD_ID,
+ "device_type": "type-VF",
+ "name": "a-vf",
+ },
+ )
+ ]
+
+ def setUp(self):
+ super().setUp()
+ self.flags(group="pci", report_in_placement=True)
+
+ # These tests should not depend on the host's sysfs
+ self.useFixture(
+ fixtures.MockPatch('nova.pci.utils.is_physical_function'))
+ self.useFixture(
+ fixtures.MockPatch(
+ 'nova.pci.utils.get_function_by_ifname',
+ return_value=(None, False)
+ )
+ )
+
+
+class PlacementPCIInventoryReportingTests(PlacementPCIReportingTests):
+
+ def test_new_compute_init_with_pci_devs(self):
+ """A brand new compute is started with multiple pci devices configured
+ for nova.
+ """
+ # The fake libvirt will emulate on the host:
+ # * two type-PCI devs (slot 0 and 1)
+ # * two type-PFs (slot 2 and 3) with two type-VFs each
+ pci_info = fakelibvirt.HostPCIDevicesInfo(
+ num_pci=2, num_pfs=2, num_vfs=4)
+
+ # the emulated devices will then be filtered by the device_spec:
+ device_spec = self._to_list_of_json_str(
+ [
+ # PCI_PROD_ID will match two type-PCI devs (slot 0, 1)
+ {
+ "vendor_id": fakelibvirt.PCI_VEND_ID,
+ "product_id": fakelibvirt.PCI_PROD_ID,
+ "traits": ",".join(
+ [os_traits.HW_GPU_API_VULKAN, "CUSTOM_GPU", "purple"]
+ )
+ },
+ # PF_PROD_ID + slot 2 will match one PF but not their children
+ # VFs
+ {
+ "vendor_id": fakelibvirt.PCI_VEND_ID,
+ "product_id": fakelibvirt.PF_PROD_ID,
+ "address": "0000:81:02.0",
+ "traits": ",".join(
+ [os_traits.HW_NIC_SRIOV, "CUSTOM_PF", "pf-white"]
+ ),
+ },
+ # VF_PROD_ID + slot 3 will match two VFs but not their parent
+ # PF
+ {
+ "vendor_id": fakelibvirt.PCI_VEND_ID,
+ "product_id": fakelibvirt.VF_PROD_ID,
+ "address": "0000:81:03.*",
+ "traits": ",".join(
+ [os_traits.HW_NIC_SRIOV_TRUSTED, "CUSTOM_VF", "vf-red"]
+ ),
+ },
+ ]
+ )
+ self.flags(group='pci', device_spec=device_spec)
+ self.start_compute(hostname="compute1", pci_info=pci_info)
+
+ # Finally we assert that only the filtered devices are reported to
+ # placement.
+ self.assert_placement_pci_view(
+ "compute1",
+ inventories={
+ "0000:81:00.0": {self.PCI_RC: 1},
+ "0000:81:01.0": {self.PCI_RC: 1},
+ "0000:81:02.0": {self.PF_RC: 1},
+ # Note that the VF inventory is reported on the parent PF
+ "0000:81:03.0": {self.VF_RC: 2},
+ },
+ traits={
+ "0000:81:00.0": [
+ "HW_GPU_API_VULKAN",
+ "CUSTOM_GPU",
+ "CUSTOM_PURPLE",
+ ],
+ "0000:81:01.0": [
+ "HW_GPU_API_VULKAN",
+ "CUSTOM_GPU",
+ "CUSTOM_PURPLE",
+ ],
+ "0000:81:02.0": [
+ "HW_NIC_SRIOV",
+ "CUSTOM_PF",
+ "CUSTOM_PF_WHITE",
+ ],
+ "0000:81:03.0": [
+ "HW_NIC_SRIOV_TRUSTED",
+ "CUSTOM_VF",
+ "CUSTOM_VF_RED",
+ ],
+ },
+ )
+
+ def test_new_compute_init_with_pci_dev_custom_rc(self):
+ # The fake libvirt will emulate on the host:
+ # * one type-PCI devs slot 0
+ # * one type-PF dev in slot 1 with a single type-VF under it
+ pci_info = fakelibvirt.HostPCIDevicesInfo(
+ num_pci=1, num_pfs=1, num_vfs=1)
+
+ device_spec = self._to_list_of_json_str(
+ [
+ # PCI_PROD_ID will match the type-PCI in slot 0
+ {
+ "vendor_id": fakelibvirt.PCI_VEND_ID,
+ "product_id": fakelibvirt.PCI_PROD_ID,
+ "resource_class": os_resource_classes.PGPU,
+ "traits": os_traits.HW_GPU_API_VULKAN,
+ },
+ # slot 1 func 0 is the type-PF dev. The child VF is ignored
+ {
+ "product_id": fakelibvirt.PF_PROD_ID,
+ "address": "0000:81:01.0",
+ "resource_class": "crypto",
+ "traits": "to-the-moon,hodl"
+ },
+ ]
+ )
+ self.flags(group='pci', device_spec=device_spec)
+ self.start_compute(hostname="compute1", pci_info=pci_info)
+
+ self.assert_placement_pci_view(
+ "compute1",
+ inventories={
+ "0000:81:00.0": {os_resource_classes.PGPU: 1},
+ "0000:81:01.0": {"CUSTOM_CRYPTO": 1},
+ },
+ traits={
+ "0000:81:00.0": [
+ "HW_GPU_API_VULKAN",
+ ],
+ "0000:81:01.0": [
+ "CUSTOM_TO_THE_MOON",
+ "CUSTOM_HODL",
+ ],
+ },
+ )
+
+ def test_dependent_device_config_is_rejected(self):
+ """Configuring both the PF and its children VFs is not supported.
+ Only either of them can be given to nova.
+ """
+ # The fake libvirt will emulate on the host:
+ # * one type-PF dev in slot 0 with a single type-VF under it
+ pci_info = fakelibvirt.HostPCIDevicesInfo(
+ num_pci=0, num_pfs=1, num_vfs=1)
+ # both device will be matched by our config
+ device_spec = self._to_list_of_json_str(
+ [
+ # PF
+ {
+ "address": "0000:81:00.0"
+ },
+ # Its child VF
+ {
+ "address": "0000:81:00.1"
+ },
+ ]
+ )
+ self.flags(group='pci', device_spec=device_spec)
+
+ ex = self.assertRaises(
+ exception.PlacementPciException,
+ self.start_compute,
+ hostname="compute1",
+ pci_info=pci_info
+ )
+ self.assertIn(
+ "Configuring both 0000:81:00.1 and 0000:81:00.0 in "
+ "[pci]device_spec is not supported",
+ str(ex)
+ )
+
+ def test_sibling_vfs_with_contradicting_resource_classes_rejected(self):
+ # The fake libvirt will emulate on the host:
+ # * one type-PF dev in slot 0 with two type-VF under it
+ pci_info = fakelibvirt.HostPCIDevicesInfo(
+ num_pci=0, num_pfs=1, num_vfs=2)
+ # the config matches the two VFs separately and tries to configure
+ # them with different resource class
+ device_spec = self._to_list_of_json_str(
+ [
+ {
+ "address": "0000:81:00.1",
+ "resource_class": "vf1"
+ },
+ {
+ "address": "0000:81:00.2",
+ "resource_class": "vf2"
+ },
+ ]
+ )
+ self.flags(group='pci', device_spec=device_spec)
+
+ ex = self.assertRaises(
+ exception.PlacementPciMixedResourceClassException,
+ self.start_compute,
+ hostname="compute1",
+ pci_info=pci_info
+ )
+ self.assertIn(
+ "VFs from the same PF cannot be configured with different "
+ "'resource_class' values in [pci]device_spec. We got "
+ "CUSTOM_VF2 for 0000:81:00.2 and CUSTOM_VF1 for 0000:81:00.1.",
+ str(ex)
+ )
+
+ def test_sibling_vfs_with_contradicting_traits_rejected(self):
+ # The fake libvirt will emulate on the host:
+ # * one type-PF dev in slot 0 with two type-VF under it
+ pci_info = fakelibvirt.HostPCIDevicesInfo(
+ num_pci=0, num_pfs=1, num_vfs=2)
+ # the config matches the two VFs separately and tries to configure
+ # them with different trait list
+ device_spec = self._to_list_of_json_str(
+ [
+ {
+ "address": "0000:81:00.1",
+ "traits": "foo",
+ },
+ {
+ "address": "0000:81:00.2",
+ "traits": "bar",
+ },
+ ]
+ )
+ self.flags(group='pci', device_spec=device_spec)
+
+ ex = self.assertRaises(
+ exception.PlacementPciMixedTraitsException,
+ self.start_compute,
+ hostname="compute1",
+ pci_info=pci_info
+ )
+ self.assertIn(
+ "VFs from the same PF cannot be configured with different set of "
+ "'traits' in [pci]device_spec. We got "
+ "COMPUTE_MANAGED_PCI_DEVICE,CUSTOM_BAR for 0000:81:00.2 and "
+ "COMPUTE_MANAGED_PCI_DEVICE,CUSTOM_FOO for 0000:81:00.1.",
+ str(ex)
+ )
+
+ def test_neutron_sriov_devs_ignored(self):
+ # The fake libvirt will emulate on the host:
+ # * one type-PF dev in slot 0 with one type-VF under it
+ pci_info = fakelibvirt.HostPCIDevicesInfo(
+ num_pci=0, num_pfs=1, num_vfs=1)
+ # then the config assigns physnet to the dev
+ device_spec = self._to_list_of_json_str(
+ [
+ {
+ "vendor_id": fakelibvirt.PCI_VEND_ID,
+ "product_id": fakelibvirt.PF_PROD_ID,
+ "physical_network": "physnet0",
+ },
+ ]
+ )
+ self.flags(group='pci', device_spec=device_spec)
+ self.start_compute(hostname="compute1", pci_info=pci_info)
+
+ # As every matching dev has physnet configured they are ignored
+ self.assert_placement_pci_view(
+ "compute1",
+ inventories={},
+ traits={},
+ )
+
+ def test_devname_based_dev_spec_rejected(self):
+ device_spec = self._to_list_of_json_str(
+ [
+ {
+ "devname": "eth0",
+ },
+ ]
+ )
+ self.flags(group='pci', device_spec=device_spec)
+
+ ex = self.assertRaises(
+ exception.PlacementPciException,
+ self.start_compute,
+ hostname="compute1",
+ )
+ self.assertIn(
+ " Invalid [pci]device_spec configuration. PCI Placement reporting "
+ "does not support 'devname' based device specification but we got "
+ "{'devname': 'eth0'}. Please use PCI address in the configuration "
+ "instead.",
+ str(ex)
+ )
+
+ def test_remove_pci(self):
+ # The fake libvirt will emulate on the host:
+ # * one type-PCI
+ pci_info = fakelibvirt.HostPCIDevicesInfo(
+ num_pci=1, num_pfs=0, num_vfs=0)
+ # the config matches that PCI dev
+ device_spec = self._to_list_of_json_str(
+ [
+ {
+ "vendor_id": fakelibvirt.PCI_VEND_ID,
+ "product_id": fakelibvirt.PCI_PROD_ID,
+ },
+ ]
+ )
+ self.flags(group='pci', device_spec=device_spec)
+ self.start_compute(hostname="compute1", pci_info=pci_info)
+
+ self.assert_placement_pci_view(
+ "compute1",
+ inventories={
+ "0000:81:00.0": {self.PCI_RC: 1},
+ },
+ traits={
+ "0000:81:00.0": [],
+ },
+ )
+
+ # now un-configure the PCI device and restart the compute
+ self.flags(group='pci', device_spec=self._to_list_of_json_str([]))
+ self.restart_compute_service(hostname="compute1")
+
+ # the RP had no allocation so nova could remove it
+ self.assert_placement_pci_view(
+ "compute1",
+ inventories={},
+ traits={},
+ )
+
+ def test_remove_one_vf(self):
+ # The fake libvirt will emulate on the host:
+ # * one type-PFs in slot 0 with two type-VFs 00.1, 00.2
+ pci_info = fakelibvirt.HostPCIDevicesInfo(
+ num_pci=0, num_pfs=1, num_vfs=2)
+ # then the config matching the VFs
+ device_spec = self._to_list_of_json_str(
+ [
+ {
+ "vendor_id": fakelibvirt.PCI_VEND_ID,
+ "product_id": fakelibvirt.VF_PROD_ID,
+ },
+ ]
+ )
+ self.flags(group='pci', device_spec=device_spec)
+ self.start_compute(hostname="compute1", pci_info=pci_info)
+
+ self.assert_placement_pci_view(
+ "compute1",
+ inventories={
+ "0000:81:00.0": {self.VF_RC: 2},
+ },
+ traits={
+ "0000:81:00.0": [],
+ },
+ )
+
+ # remove one of the VFs from the hypervisor and then restart the
+ # compute
+ pci_info = fakelibvirt.HostPCIDevicesInfo(
+ num_pci=0, num_pfs=1, num_vfs=1)
+ self.restart_compute_service(
+ hostname="compute1",
+ pci_info=pci_info,
+ keep_hypervisor_state=False
+ )
+
+ # total value is expected to decrease to 1
+ self.assert_placement_pci_view(
+ "compute1",
+ inventories={
+ "0000:81:00.0": {self.VF_RC: 1},
+ },
+ traits={
+ "0000:81:00.0": [],
+ },
+ )
+
+ def test_remove_all_vfs(self):
+ # The fake libvirt will emulate on the host:
+ # * one type-PFs in slot 0 with two type-VFs 00.1, 00.2
+ pci_info = fakelibvirt.HostPCIDevicesInfo(
+ num_pci=0, num_pfs=1, num_vfs=2)
+ # then the config patches the VFs
+ device_spec = self._to_list_of_json_str(
+ [
+ {
+ "vendor_id": fakelibvirt.PCI_VEND_ID,
+ "product_id": fakelibvirt.VF_PROD_ID,
+ },
+ ]
+ )
+ self.flags(group='pci', device_spec=device_spec)
+ self.start_compute(hostname="compute1", pci_info=pci_info)
+
+ self.assert_placement_pci_view(
+ "compute1",
+ inventories={
+ "0000:81:00.0": {self.VF_RC: 2},
+ },
+ traits={
+ "0000:81:00.0": [],
+ },
+ )
+
+ # remove both VFs from the hypervisor and restart the compute
+ pci_info = fakelibvirt.HostPCIDevicesInfo(
+ num_pci=0, num_pfs=1, num_vfs=0)
+ self.restart_compute_service(
+ hostname="compute1",
+ pci_info=pci_info,
+ keep_hypervisor_state=False
+ )
+
+ # we expect that the RP is deleted
+ self.assert_placement_pci_view(
+ "compute1",
+ inventories={},
+ traits={},
+ )
+
+ def test_remove_all_vfs_add_pf(self):
+ # The fake libvirt will emulate on the host:
+ # * one type-PFs in slot 0 with two type-VFs 00.1, 00.2
+ pci_info = fakelibvirt.HostPCIDevicesInfo(
+ num_pci=0, num_pfs=1, num_vfs=2)
+ # then the config matches both VFs
+ device_spec = self._to_list_of_json_str(
+ [
+ {
+ "vendor_id": fakelibvirt.PCI_VEND_ID,
+ "product_id": fakelibvirt.VF_PROD_ID,
+ },
+ ]
+ )
+ self.flags(group='pci', device_spec=device_spec)
+ self.start_compute(hostname="compute1", pci_info=pci_info)
+
+ self.assert_placement_pci_view(
+ "compute1",
+ inventories={
+ "0000:81:00.0": {self.VF_RC: 2},
+ },
+ traits={
+ "0000:81:00.0": [],
+ },
+ )
+
+ # change the config to match the PF but do not match the VFs and
+ # restart the compute
+ device_spec = self._to_list_of_json_str(
+ [
+ {
+ "vendor_id": fakelibvirt.PCI_VEND_ID,
+ "product_id": fakelibvirt.PF_PROD_ID,
+ },
+ ]
+ )
+ self.flags(group='pci', device_spec=device_spec)
+ self.restart_compute_service(
+ hostname="compute1",
+ pci_info=pci_info,
+ keep_hypervisor_state=False
+ )
+
+ # we expect that VF inventory is removed and the PF inventory is added
+ self.assert_placement_pci_view(
+ "compute1",
+ inventories={
+ "0000:81:00.0": {self.PF_RC: 1},
+ },
+ traits={
+ "0000:81:00.0": [],
+ },
+ )
+
+ def test_remove_pf_add_vfs(self):
+ # The fake libvirt will emulate on the host:
+ # * one type-PFs in slot 0 with two type-VFs 00.1, 00.2
+ pci_info = fakelibvirt.HostPCIDevicesInfo(
+ num_pci=0, num_pfs=1, num_vfs=2)
+ # then the config only matches the PF
+ device_spec = self._to_list_of_json_str(
+ [
+ {
+ "vendor_id": fakelibvirt.PCI_VEND_ID,
+ "product_id": fakelibvirt.PF_PROD_ID,
+ },
+ ]
+ )
+ self.flags(group='pci', device_spec=device_spec)
+ self.start_compute(hostname="compute1", pci_info=pci_info)
+
+ self.assert_placement_pci_view(
+ "compute1",
+ inventories={
+ "0000:81:00.0": {self.PF_RC: 1},
+ },
+ traits={
+ "0000:81:00.0": [],
+ },
+ )
+
+ # remove the PF from the config and add the VFs instead then restart
+ # the compute
+ device_spec = self._to_list_of_json_str(
+ [
+ {
+ "vendor_id": fakelibvirt.PCI_VEND_ID,
+ "product_id": fakelibvirt.VF_PROD_ID,
+ },
+ ]
+ )
+ self.flags(group='pci', device_spec=device_spec)
+ self.restart_compute_service(
+ hostname="compute1",
+ pci_info=pci_info,
+ keep_hypervisor_state=False
+ )
+
+ # we expect that PF inventory is removed and the VF inventory is added
+ self.assert_placement_pci_view(
+ "compute1",
+ inventories={
+ "0000:81:00.0": {self.VF_RC: 2},
+ },
+ traits={
+ "0000:81:00.0": [],
+ },
+ )
+
+ def test_device_reconfiguration(self):
+ # The fake libvirt will emulate on the host:
+ # * two type-PFs in slot 0, 1 with two type-VFs each
+ pci_info = fakelibvirt.HostPCIDevicesInfo(
+ num_pci=0, num_pfs=2, num_vfs=4)
+ # from slot 0 we match the PF only and ignore the VFs
+ # from slot 1 we match the VFs but ignore the parent PF
+ device_spec = self._to_list_of_json_str(
+ [
+ {
+ "vendor_id": fakelibvirt.PCI_VEND_ID,
+ "product_id": fakelibvirt.PF_PROD_ID,
+ "address": "0000:81:00.0",
+ "traits": ",".join(
+ [os_traits.HW_NIC_SRIOV, "CUSTOM_PF", "pf-white"]
+ ),
+ },
+ {
+ "vendor_id": fakelibvirt.PCI_VEND_ID,
+ "product_id": fakelibvirt.VF_PROD_ID,
+ "address": "0000:81:01.*",
+ "traits": ",".join(
+ [os_traits.HW_NIC_SRIOV_TRUSTED, "CUSTOM_VF", "vf-red"]
+ ),
+ },
+ ]
+ )
+ self.flags(group='pci', device_spec=device_spec)
+ self.start_compute(hostname="compute1", pci_info=pci_info)
+
+ self.assert_placement_pci_view(
+ "compute1",
+ inventories={
+ "0000:81:00.0": {self.PF_RC: 1},
+ "0000:81:01.0": {self.VF_RC: 2},
+ },
+ traits={
+ "0000:81:00.0": [
+ "HW_NIC_SRIOV",
+ "CUSTOM_PF",
+ "CUSTOM_PF_WHITE",
+ ],
+ "0000:81:01.0": [
+ "HW_NIC_SRIOV_TRUSTED",
+ "CUSTOM_VF",
+ "CUSTOM_VF_RED",
+ ],
+ },
+ )
+
+ # change the resource class and traits configuration and restart the
+ # compute
+ device_spec = self._to_list_of_json_str(
+ [
+ {
+ "product_id": fakelibvirt.PF_PROD_ID,
+ "resource_class": "CUSTOM_PF",
+ "address": "0000:81:00.0",
+ "traits": ",".join(
+ [os_traits.HW_NIC_SRIOV, "pf-black"]
+ ),
+ },
+ {
+ "vendor_id": fakelibvirt.PCI_VEND_ID,
+ "product_id": fakelibvirt.VF_PROD_ID,
+ "resource_class": "CUSTOM_VF",
+ "address": "0000:81:01.*",
+ "traits": ",".join(
+ [os_traits.HW_NIC_SRIOV_TRUSTED, "vf-blue", "foobar"]
+ ),
+ },
+ ]
+ )
+ self.flags(group='pci', device_spec=device_spec)
+ self.restart_compute_service(
+ hostname="compute1",
+ pci_info=pci_info,
+ keep_hypervisor_state=False
+ )
+
+ self.assert_placement_pci_view(
+ "compute1",
+ inventories={
+ "0000:81:00.0": {"CUSTOM_PF": 1},
+ "0000:81:01.0": {"CUSTOM_VF": 2},
+ },
+ traits={
+ "0000:81:00.0": [
+ "HW_NIC_SRIOV",
+ "CUSTOM_PF_BLACK",
+ ],
+ "0000:81:01.0": [
+ "HW_NIC_SRIOV_TRUSTED",
+ "CUSTOM_VF_BLUE",
+ "CUSTOM_FOOBAR",
+ ],
+ },
+ )
+
+ def _create_one_compute_with_a_pf_consumed_by_an_instance(self):
+ # The fake libvirt will emulate on the host:
+ # * two type-PFs in slot 0, with one type-VF
+ pci_info = fakelibvirt.HostPCIDevicesInfo(
+ num_pci=0, num_pfs=1, num_vfs=1)
+ # we match the PF only and ignore the VF
+ device_spec = self._to_list_of_json_str(
+ [
+ {
+ "vendor_id": fakelibvirt.PCI_VEND_ID,
+ "product_id": fakelibvirt.PF_PROD_ID,
+ "address": "0000:81:00.0",
+ },
+ ]
+ )
+ self.flags(group='pci', device_spec=device_spec)
+ self.flags(group="pci", report_in_placement=True)
+ self.start_compute(hostname="compute1", pci_info=pci_info)
+
+ self.assertPCIDeviceCounts("compute1", total=1, free=1)
+ compute1_expected_placement_view = {
+ "inventories": {
+ "0000:81:00.0": {self.PF_RC: 1},
+ },
+ "traits": {
+ "0000:81:00.0": [],
+ },
+ "usages": {
+ "0000:81:00.0": {self.PF_RC: 0},
+ },
+ "allocations": {},
+ }
+ self.assert_placement_pci_view(
+ "compute1", **compute1_expected_placement_view)
+
+ # Create an instance consuming the PF
+ extra_spec = {"pci_passthrough:alias": "a-pf:1"}
+ flavor_id = self._create_flavor(extra_spec=extra_spec)
+ server = self._create_server(flavor_id=flavor_id, networks=[])
+
+ self.assertPCIDeviceCounts("compute1", total=1, free=0)
+ compute1_expected_placement_view["usages"] = {
+ "0000:81:00.0": {self.PF_RC: 1},
+ }
+ compute1_expected_placement_view["allocations"][server["id"]] = {
+ "0000:81:00.0": {self.PF_RC: 1},
+ }
+ self.assert_placement_pci_view(
+ "compute1", **compute1_expected_placement_view)
+ self._run_periodics()
+ self.assert_placement_pci_view(
+ "compute1", **compute1_expected_placement_view)
+
+ return server, compute1_expected_placement_view
+
+ def test_device_reconfiguration_with_allocations_config_change_warn(self):
+ server, compute1_expected_placement_view = (
+ self._create_one_compute_with_a_pf_consumed_by_an_instance())
+
+ # remove 0000:81:00.0 from the device spec and restart the compute
+ device_spec = self._to_list_of_json_str([])
+ self.flags(group='pci', device_spec=device_spec)
+ # The PF is used but removed from the config. The PciTracker warns
+ # but keeps the device so the placement logic mimic this and only warns
+ # but keeps the RP and the allocation in placement intact.
+ self.restart_compute_service(hostname="compute1")
+ self.assert_placement_pci_view(
+ "compute1", **compute1_expected_placement_view)
+ self._run_periodics()
+ self.assert_placement_pci_view(
+ "compute1", **compute1_expected_placement_view)
+ # the warning from the PciTracker
+ self.assertIn(
+ "WARNING [nova.pci.manager] Unable to remove device with status "
+ "'allocated' and ownership %s because of PCI device "
+ "1:0000:81:00.0 is allocated instead of ['available', "
+ "'unavailable', 'unclaimable']. Check your [pci]device_spec "
+ "configuration to make sure this allocated device is whitelisted. "
+ "If you have removed the device from the whitelist intentionally "
+ "or the device is no longer available on the host you will need "
+ "to delete the server or migrate it to another host to silence "
+ "this warning."
+ % server['id'],
+ self.stdlog.logger.output,
+ )
+ # the warning from the placement PCI tracking logic
+ self.assertIn(
+ "WARNING [nova.compute.pci_placement_translator] Device spec is "
+ "not found for device 0000:81:00.0 in [pci]device_spec. We are "
+ "skipping this devices during Placement update. The device is "
+ "allocated by %s. You should not remove an allocated device from "
+ "the configuration. Please restore the configuration or cold "
+ "migrate the instance to resolve the inconsistency."
+ % server['id'],
+ self.stdlog.logger.output,
+ )
+
+ def test_device_reconfiguration_with_allocations_config_change_stop(self):
+ self._create_one_compute_with_a_pf_consumed_by_an_instance()
+
+ # switch 0000:81:00.0 PF to 0000:81:00.1 VF
+ # in the config, then restart the compute service
+
+ # only match the VF now
+ device_spec = self._to_list_of_json_str(
+ [
+ {
+ "vendor_id": fakelibvirt.PCI_VEND_ID,
+ "product_id": fakelibvirt.VF_PROD_ID,
+ "address": "0000:81:00.1",
+ },
+ ]
+ )
+ self.flags(group='pci', device_spec=device_spec)
+ # The compute fails to start as the new config would mean that the PF
+ # inventory is removed from the 0000:81:00.0 RP and the PF inventory is
+ # added instead there, but the VF inventory has allocations. Keeping
+ # the old inventory as in
+ # test_device_reconfiguration_with_allocations_config_change_warn is
+ # not an option as it would result in two resource class on the same RP
+ # one for the PF and one for the VF. That would allow consuming
+ # the same physical device twice. Such dependent device configuration
+ # is intentionally not supported so we are stopping the compute
+ # service.
+ ex = self.assertRaises(
+ exception.PlacementPciException,
+ self.restart_compute_service,
+ hostname="compute1"
+ )
+ self.assertRegex(
+ str(ex),
+ "Failed to gather or report PCI resources to Placement: There was "
+ "a conflict when trying to complete your request.\n\n "
+ "update conflict: Inventory for 'CUSTOM_PCI_8086_1528' on "
+ "resource provider '.*' in use.",
+ )
+
+ def test_device_reconfiguration_with_allocations_hyp_change(self):
+ server, compute1_expected_placement_view = (
+ self._create_one_compute_with_a_pf_consumed_by_an_instance())
+
+ # restart the compute but simulate that the device 0000:81:00.0 is
+ # removed from the hypervisor while the device spec config left
+ # intact. The PciTracker will notice this and log a warning. The
+ # placement tracking logic simply keeps the allocation intact in
+ # placement as both the PciDevice and the DeviceSpec is available.
+ pci_info = fakelibvirt.HostPCIDevicesInfo(
+ num_pci=0, num_pfs=0, num_vfs=0)
+ self.restart_compute_service(
+ hostname="compute1",
+ pci_info=pci_info,
+ keep_hypervisor_state=False
+ )
+ self.assert_placement_pci_view(
+ "compute1", **compute1_expected_placement_view)
+ self._run_periodics()
+ self.assert_placement_pci_view(
+ "compute1", **compute1_expected_placement_view)
+ # the warning from the PciTracker
+ self.assertIn(
+ "WARNING [nova.pci.manager] Unable to remove device with status "
+ "'allocated' and ownership %s because of PCI device "
+ "1:0000:81:00.0 is allocated instead of ['available', "
+ "'unavailable', 'unclaimable']. Check your [pci]device_spec "
+ "configuration to make sure this allocated device is whitelisted. "
+ "If you have removed the device from the whitelist intentionally "
+ "or the device is no longer available on the host you will need "
+ "to delete the server or migrate it to another host to silence "
+ "this warning."
+ % server['id'],
+ self.stdlog.logger.output,
+ )
+
+ def test_reporting_disabled_nothing_is_reported(self):
+ # The fake libvirt will emulate on the host:
+ # * one type-PCI in slot 0
+ pci_info = fakelibvirt.HostPCIDevicesInfo(
+ num_pci=1, num_pfs=0, num_vfs=0)
+ # the config matches the PCI dev
+ device_spec = self._to_list_of_json_str(
+ [
+ {
+ "vendor_id": fakelibvirt.PCI_VEND_ID,
+ "product_id": fakelibvirt.PCI_PROD_ID,
+ },
+ ]
+ )
+ self.flags(group='pci', device_spec=device_spec)
+ # Disable placement reporting so even if there are PCI devices on the
+ # hypervisor matching the [pci]device_spec config they are not reported
+ # to Placement
+ self.flags(group="pci", report_in_placement=False)
+ self.start_compute(hostname="compute1", pci_info=pci_info)
+
+ self.assert_placement_pci_view(
+ "compute1",
+ inventories={},
+ traits={},
+ )
+
+ def test_reporting_cannot_be_disable_once_it_is_enabled(self):
+ # The fake libvirt will emulate on the host:
+ # * one type-PCI in slot 0
+ pci_info = fakelibvirt.HostPCIDevicesInfo(
+ num_pci=1, num_pfs=0, num_vfs=0)
+ # the config matches the PCI dev
+ device_spec = self._to_list_of_json_str(
+ [
+ {
+ "vendor_id": fakelibvirt.PCI_VEND_ID,
+ "product_id": fakelibvirt.PCI_PROD_ID,
+ },
+ ]
+ )
+ self.flags(group='pci', device_spec=device_spec)
+ self.start_compute(hostname="compute1", pci_info=pci_info)
+
+ self.assert_placement_pci_view(
+ "compute1",
+ inventories={
+ "0000:81:00.0": {self.PCI_RC: 1},
+ },
+ traits={
+ "0000:81:00.0": [],
+ },
+ )
+
+ # Try to disable placement reporting. The compute will refuse to start
+ # as there are already PCI device RPs in placement.
+ self.flags(group="pci", report_in_placement=False)
+ ex = self.assertRaises(
+ exception.PlacementPciException,
+ self.restart_compute_service,
+ hostname="compute1",
+ pci_info=pci_info,
+ keep_hypervisor_state=False,
+ )
+ self.assertIn(
+ "The [pci]report_in_placement is False but it was enabled before "
+ "on this compute. Nova does not support disabling it after it is "
+ "enabled.",
+ str(ex)
+ )
+
+
+class PlacementPCIAllocationHealingTests(PlacementPCIReportingTests):
+ def setUp(self):
+ super().setUp()
+ # Make migration succeed
+ self.useFixture(
+ fixtures.MockPatch(
+ "nova.virt.libvirt.driver.LibvirtDriver."
+ "migrate_disk_and_power_off",
+ new=mock.Mock(return_value='{}'),
+ )
+ )
+
+ def test_heal_single_pci_allocation(self):
+ # The fake libvirt will emulate on the host:
+ # * one type-PCI in slot 0
+ pci_info = fakelibvirt.HostPCIDevicesInfo(
+ num_pci=1, num_pfs=0, num_vfs=0)
+ # the config matches the PCI dev
+ device_spec = self._to_list_of_json_str(
+ [
+ {
+ "vendor_id": fakelibvirt.PCI_VEND_ID,
+ "product_id": fakelibvirt.PCI_PROD_ID,
+ },
+ ]
+ )
+ self.flags(group='pci', device_spec=device_spec)
+
+ # Start a compute *without* PCI tracking in placement
+ self.flags(group="pci", report_in_placement=False)
+ self.start_compute(hostname="compute1", pci_info=pci_info)
+ self.assertPCIDeviceCounts("compute1", total=1, free=1)
+
+ # Create an instance that consume our PCI dev
+ extra_spec = {"pci_passthrough:alias": "a-pci-dev:1"}
+ flavor_id = self._create_flavor(extra_spec=extra_spec)
+ server = self._create_server(flavor_id=flavor_id, networks=[])
+ self.assertPCIDeviceCounts("compute1", total=1, free=0)
+
+ # Restart the compute but now with PCI tracking enabled
+ self.flags(group="pci", report_in_placement=True)
+ self.restart_compute_service("compute1")
+ # Assert that the PCI allocation is healed in placement
+ self.assertPCIDeviceCounts("compute1", total=1, free=0)
+ expected_placement_view = {
+ "inventories": {
+ "0000:81:00.0": {self.PCI_RC: 1},
+ },
+ "traits": {
+ "0000:81:00.0": [],
+ },
+ "usages": {
+ "0000:81:00.0": {self.PCI_RC: 1}
+ },
+ "allocations": {
+ server['id']: {
+ "0000:81:00.0": {self.PCI_RC: 1}
+ }
+ }
+ }
+ self.assert_placement_pci_view("compute1", **expected_placement_view)
+
+ # run an update_available_resources periodic and assert that the usage
+ # and allocation stays
+ self._run_periodics()
+ self.assert_placement_pci_view("compute1", **expected_placement_view)
+
+ def test_heal_multiple_allocations(self):
+ # The fake libvirt will emulate on the host:
+ # * two type-PCI devs (slot 0 and 1)
+ # * two type-PFs (slot 2 and 3) with 4 type-VFs each
+ pci_info = fakelibvirt.HostPCIDevicesInfo(
+ num_pci=2, num_pfs=2, num_vfs=8)
+ # the config matches:
+ device_spec = self._to_list_of_json_str(
+ [
+ # both type-PCI
+ {
+ "vendor_id": fakelibvirt.PCI_VEND_ID,
+ "product_id": fakelibvirt.PCI_PROD_ID,
+ },
+ # the PF in slot 2
+ {
+ "vendor_id": fakelibvirt.PCI_VEND_ID,
+ "product_id": fakelibvirt.PF_PROD_ID,
+ "address": "0000:81:02.0",
+ },
+ # the VFs in slot 3
+ {
+ "vendor_id": fakelibvirt.PCI_VEND_ID,
+ "product_id": fakelibvirt.VF_PROD_ID,
+ "address": "0000:81:03.*",
+ },
+ ]
+ )
+ self.flags(group='pci', device_spec=device_spec)
+
+ # Start a compute *without* PCI tracking in placement
+ self.flags(group="pci", report_in_placement=False)
+ self.start_compute(hostname="compute1", pci_info=pci_info)
+ # 2 PCI + 1 PF + 4 VFs
+ self.assertPCIDeviceCounts("compute1", total=7, free=7)
+
+ # Create three instances consuming devices:
+ # * server_2pci: two type-PCI
+ # * server_pf_vf: one PF and one VF
+ # * server_2vf: two VFs
+ extra_spec = {"pci_passthrough:alias": "a-pci-dev:2"}
+ flavor_id = self._create_flavor(extra_spec=extra_spec)
+ server_2pci = self._create_server(flavor_id=flavor_id, networks=[])
+ self.assertPCIDeviceCounts("compute1", total=7, free=5)
+
+ extra_spec = {"pci_passthrough:alias": "a-pf:1,a-vf:1"}
+ flavor_id = self._create_flavor(extra_spec=extra_spec)
+ server_pf_vf = self._create_server(flavor_id=flavor_id, networks=[])
+ self.assertPCIDeviceCounts("compute1", total=7, free=3)
+
+ extra_spec = {"pci_passthrough:alias": "a-vf:2"}
+ flavor_id = self._create_flavor(extra_spec=extra_spec)
+ server_2vf = self._create_server(flavor_id=flavor_id, networks=[])
+ self.assertPCIDeviceCounts("compute1", total=7, free=1)
+
+ # Restart the compute but now with PCI tracking enabled
+ self.flags(group="pci", report_in_placement=True)
+ self.restart_compute_service("compute1")
+ # Assert that the PCI allocation is healed in placement
+ self.assertPCIDeviceCounts("compute1", total=7, free=1)
+ expected_placement_view = {
+ "inventories": {
+ "0000:81:00.0": {self.PCI_RC: 1},
+ "0000:81:01.0": {self.PCI_RC: 1},
+ "0000:81:02.0": {self.PF_RC: 1},
+ "0000:81:03.0": {self.VF_RC: 4},
+ },
+ "traits": {
+ "0000:81:00.0": [],
+ "0000:81:01.0": [],
+ "0000:81:02.0": [],
+ "0000:81:03.0": [],
+ },
+ "usages": {
+ "0000:81:00.0": {self.PCI_RC: 1},
+ "0000:81:01.0": {self.PCI_RC: 1},
+ "0000:81:02.0": {self.PF_RC: 1},
+ "0000:81:03.0": {self.VF_RC: 3},
+ },
+ "allocations": {
+ server_2pci['id']: {
+ "0000:81:00.0": {self.PCI_RC: 1},
+ "0000:81:01.0": {self.PCI_RC: 1},
+ },
+ server_pf_vf['id']: {
+ "0000:81:02.0": {self.PF_RC: 1},
+ "0000:81:03.0": {self.VF_RC: 1},
+ },
+ server_2vf['id']: {
+ "0000:81:03.0": {self.VF_RC: 2}
+ },
+ },
+ }
+ self.assert_placement_pci_view("compute1", **expected_placement_view)
+
+ # run an update_available_resources periodic and assert that the usage
+ # and allocation stays
+ self._run_periodics()
+ self.assert_placement_pci_view("compute1", **expected_placement_view)
+
+ def test_heal_partial_allocations(self):
+ # The fake libvirt will emulate on the host:
+ # * two type-PCI devs (slot 0 and 1)
+ # * two type-PFs (slot 2 and 3) with 4 type-VFs each
+ pci_info = fakelibvirt.HostPCIDevicesInfo(
+ num_pci=2, num_pfs=2, num_vfs=8)
+ # the config matches:
+ device_spec = self._to_list_of_json_str(
+ [
+ # both type-PCI
+ {
+ "vendor_id": fakelibvirt.PCI_VEND_ID,
+ "product_id": fakelibvirt.PCI_PROD_ID,
+ },
+ # the PF in slot 2
+ {
+ "vendor_id": fakelibvirt.PCI_VEND_ID,
+ "product_id": fakelibvirt.PF_PROD_ID,
+ "address": "0000:81:02.0",
+ },
+ # the VFs in slot 3
+ {
+ "vendor_id": fakelibvirt.PCI_VEND_ID,
+ "product_id": fakelibvirt.VF_PROD_ID,
+ "address": "0000:81:03.*",
+ },
+ ]
+ )
+ self.flags(group='pci', device_spec=device_spec)
+
+ # Start a compute with PCI tracking in placement
+ self.flags(group="pci", report_in_placement=True)
+ self.start_compute(hostname="compute1", pci_info=pci_info)
+ # 2 PCI + 1 PF + 4 VFs
+ self.assertPCIDeviceCounts("compute1", total=7, free=7)
+ expected_placement_view = {
+ "inventories": {
+ "0000:81:00.0": {self.PCI_RC: 1},
+ "0000:81:01.0": {self.PCI_RC: 1},
+ "0000:81:02.0": {self.PF_RC: 1},
+ "0000:81:03.0": {self.VF_RC: 4},
+ },
+ "traits": {
+ "0000:81:00.0": [],
+ "0000:81:01.0": [],
+ "0000:81:02.0": [],
+ "0000:81:03.0": [],
+ },
+ "usages": {
+ "0000:81:00.0": {self.PCI_RC: 0},
+ "0000:81:01.0": {self.PCI_RC: 0},
+ "0000:81:02.0": {self.PF_RC: 0},
+ "0000:81:03.0": {self.VF_RC: 0},
+ },
+ "allocations": {},
+ }
+ self.assert_placement_pci_view("compute1", **expected_placement_view)
+
+ # Create an instance consuming a VF
+ extra_spec = {"pci_passthrough:alias": "a-vf:1"}
+ flavor_id = self._create_flavor(extra_spec=extra_spec)
+ server_vf = self._create_server(flavor_id=flavor_id, networks=[])
+ self.assertPCIDeviceCounts("compute1", total=7, free=6)
+ # As scheduling does not support PCI in placement yet no allocation
+ # is created for the PCI consumption by the scheduler. BUT the resource
+ # tracker in the compute will heal the missing PCI allocation
+ expected_placement_view["usages"]["0000:81:03.0"][self.VF_RC] = 1
+ expected_placement_view["allocations"][server_vf["id"]] = {
+ "0000:81:03.0": {self.VF_RC: 1}
+ }
+ self.assert_placement_pci_view("compute1", **expected_placement_view)
+ self._run_periodics()
+ self.assert_placement_pci_view("compute1", **expected_placement_view)
+
+ # Create another instance consuming two VFs
+ extra_spec = {"pci_passthrough:alias": "a-vf:2"}
+ flavor_id = self._create_flavor(extra_spec=extra_spec)
+ server_2vf = self._create_server(flavor_id=flavor_id, networks=[])
+ self.assertPCIDeviceCounts("compute1", total=7, free=4)
+ # As scheduling does not support PCI in placement yet no allocation
+ # is created for the PCI consumption by the scheduler. BUT the resource
+ # tracker in the compute will heal the missing PCI allocation
+ expected_placement_view["usages"]["0000:81:03.0"][self.VF_RC] = 3
+ expected_placement_view["allocations"][server_2vf["id"]] = {
+ "0000:81:03.0": {self.VF_RC: 2}
+ }
+ self.assert_placement_pci_view("compute1", **expected_placement_view)
+ self._run_periodics()
+ self.assert_placement_pci_view("compute1", **expected_placement_view)
+
+ def test_heal_partial_allocations_during_resize_downsize(self):
+ # The fake libvirt will emulate on the host:
+ # * one type-PFs (slot 0) with 2 type-VFs
+ compute1_pci_info = fakelibvirt.HostPCIDevicesInfo(
+ num_pci=0, num_pfs=1, num_vfs=2)
+ # the config matches just the VFs
+ compute1_device_spec = self._to_list_of_json_str(
+ [
+ {
+ "vendor_id": fakelibvirt.PCI_VEND_ID,
+ "product_id": fakelibvirt.VF_PROD_ID,
+ "address": "0000:81:00.*",
+ },
+ ]
+ )
+ self.flags(group='pci', device_spec=compute1_device_spec)
+
+ # Start a compute with PCI tracking in placement
+ self.flags(group="pci", report_in_placement=True)
+ self.start_compute(hostname="compute1", pci_info=compute1_pci_info)
+ self.assertPCIDeviceCounts("compute1", total=2, free=2)
+ compute1_expected_placement_view = {
+ "inventories": {
+ "0000:81:00.0": {self.VF_RC: 2},
+ },
+ "traits": {
+ "0000:81:00.0": [],
+ },
+ "usages": {
+ "0000:81:00.0": {self.VF_RC: 0},
+ },
+ "allocations": {},
+ }
+ self.assert_placement_pci_view(
+ "compute1", **compute1_expected_placement_view)
+
+ # Create an instance consuming two VFs
+ extra_spec = {"pci_passthrough:alias": "a-vf:2"}
+ flavor_id = self._create_flavor(extra_spec=extra_spec)
+ server = self._create_server(flavor_id=flavor_id, networks=[])
+ self.assertPCIDeviceCounts("compute1", total=2, free=0)
+ # As scheduling does not support PCI in placement yet no allocation
+ # is created for the PCI consumption by the scheduler. BUT the resource
+ # tracker in the compute will heal the missing PCI allocation
+ compute1_expected_placement_view[
+ "usages"]["0000:81:00.0"][self.VF_RC] = 2
+ compute1_expected_placement_view["allocations"][server["id"]] = {
+ "0000:81:00.0": {self.VF_RC: 2}
+ }
+ self.assert_placement_pci_view(
+ "compute1", **compute1_expected_placement_view)
+ self._run_periodics()
+ self.assert_placement_pci_view(
+ "compute1", **compute1_expected_placement_view)
+
+ # Resize server to use only one VF
+
+ # Start a new compute with only one VF available
+ # The fake libvirt will emulate on the host:
+ # * one type-PFs (slot 0) with 1 type-VFs
+ compute2_pci_info = fakelibvirt.HostPCIDevicesInfo(
+ num_pci=0, num_pfs=1, num_vfs=1)
+ # the config matches just the VFs
+ compute2_device_spec = self._to_list_of_json_str(
+ [
+ {
+ "vendor_id": fakelibvirt.PCI_VEND_ID,
+ "product_id": fakelibvirt.VF_PROD_ID,
+ "address": "0000:81:00.*",
+ },
+ ]
+ )
+ self.flags(group='pci', device_spec=compute2_device_spec)
+
+ # Start a compute with PCI tracking in placement
+ self.start_compute(hostname="compute2", pci_info=compute2_pci_info)
+ self.assertPCIDeviceCounts("compute2", total=1, free=1)
+ compute2_expected_placement_view = {
+ "inventories": {
+ "0000:81:00.0": {self.VF_RC: 1},
+ },
+ "traits": {
+ "0000:81:00.0": [],
+ },
+ "usages": {
+ "0000:81:00.0": {self.VF_RC: 0},
+ },
+ "allocations": {},
+ }
+ self.assert_placement_pci_view(
+ "compute2", **compute2_expected_placement_view)
+
+ extra_spec = {"pci_passthrough:alias": "a-vf:1"}
+ flavor_id = self._create_flavor(extra_spec=extra_spec)
+ server = self._resize_server(server, flavor_id)
+
+ self.assertPCIDeviceCounts("compute2", total=1, free=0)
+ # As scheduling does not support PCI in placement yet no allocation
+ # is created for the PCI consumption by the scheduler on the
+ # destination. BUT the resource tracker in the compute will heal the
+ # missing PCI allocation
+ compute2_expected_placement_view[
+ "usages"]["0000:81:00.0"][self.VF_RC] = 1
+ compute2_expected_placement_view["allocations"][server["id"]] = {
+ "0000:81:00.0": {self.VF_RC: 1}
+ }
+ self.assert_placement_pci_view(
+ "compute2", **compute2_expected_placement_view)
+ self._run_periodics()
+ self.assert_placement_pci_view(
+ "compute2", **compute2_expected_placement_view)
+ # the resize is not confirmed, so we expect that the source host
+ # still has PCI allocation in placement, but it is held by the
+ # migration UUID now.
+ self._move_server_allocation(
+ compute1_expected_placement_view["allocations"], server['id'])
+ self.assert_placement_pci_view(
+ "compute1", **compute1_expected_placement_view)
+
+ # revert the resize
+ server = self._revert_resize(server)
+ # the dest host should be freed up
+ compute2_expected_placement_view[
+ "usages"]["0000:81:00.0"][self.VF_RC] = 0
+ del compute2_expected_placement_view["allocations"][server["id"]]
+ self.assert_placement_pci_view(
+ "compute2", **compute2_expected_placement_view)
+ self._run_periodics()
+ self.assert_placement_pci_view(
+ "compute2", **compute2_expected_placement_view)
+ # on the source host the allocation should be moved back from the
+ # migration UUID to the instance UUID
+ self._move_server_allocation(
+ compute1_expected_placement_view["allocations"],
+ server['id'],
+ revert=True
+ )
+ self.assert_placement_pci_view(
+ "compute1", **compute1_expected_placement_view)
+
+ # resize again and this time confirm the resize
+ server = self._resize_server(server, flavor_id)
+ server = self._confirm_resize(server)
+ # the dest should have the allocation for the server
+ compute2_expected_placement_view[
+ "usages"]["0000:81:00.0"][self.VF_RC] = 1
+ compute2_expected_placement_view["allocations"][server["id"]] = {
+ "0000:81:00.0": {self.VF_RC: 1}
+ }
+ self.assert_placement_pci_view(
+ "compute2", **compute2_expected_placement_view)
+ self._run_periodics()
+ self.assert_placement_pci_view(
+ "compute2", **compute2_expected_placement_view)
+ # the source host should be freed
+ compute1_expected_placement_view[
+ "usages"]["0000:81:00.0"][self.VF_RC] = 0
+ del compute1_expected_placement_view["allocations"][server["id"]]
+ self.assert_placement_pci_view(
+ "compute1", **compute1_expected_placement_view)
+
+ def test_heal_partial_allocations_during_resize_change_dev_type(self):
+ # The fake libvirt will emulate on the host:
+ # * one type-PFs (slot 0) with 1 type-VFs
+ compute1_pci_info = fakelibvirt.HostPCIDevicesInfo(
+ num_pci=0, num_pfs=1, num_vfs=1)
+ # the config matches just the VFs
+ compute1_device_spec = self._to_list_of_json_str(
+ [
+ {
+ "vendor_id": fakelibvirt.PCI_VEND_ID,
+ "product_id": fakelibvirt.VF_PROD_ID,
+ "address": "0000:81:00.*",
+ },
+ ]
+ )
+ self.flags(group='pci', device_spec=compute1_device_spec)
+
+ # Start a compute with PCI tracking in placement
+ self.flags(group="pci", report_in_placement=True)
+ self.start_compute(hostname="compute1", pci_info=compute1_pci_info)
+ self.assertPCIDeviceCounts("compute1", total=1, free=1)
+ compute1_expected_placement_view = {
+ "inventories": {
+ "0000:81:00.0": {self.VF_RC: 1},
+ },
+ "traits": {
+ "0000:81:00.0": [],
+ },
+ "usages": {
+ "0000:81:00.0": {self.VF_RC: 0},
+ },
+ "allocations": {},
+ }
+ self.assert_placement_pci_view(
+ "compute1", **compute1_expected_placement_view)
+
+ # Create an instance consuming one VFs
+ extra_spec = {"pci_passthrough:alias": "a-vf:1"}
+ flavor_id = self._create_flavor(extra_spec=extra_spec)
+ server = self._create_server(flavor_id=flavor_id, networks=[])
+ self.assertPCIDeviceCounts("compute1", total=1, free=0)
+ # As scheduling does not support PCI in placement yet no allocation
+ # is created for the PCI consumption by the scheduler. BUT the resource
+ # tracker in the compute will heal the missing PCI allocation
+ compute1_expected_placement_view[
+ "usages"]["0000:81:00.0"][self.VF_RC] = 1
+ compute1_expected_placement_view["allocations"][server["id"]] = {
+ "0000:81:00.0": {self.VF_RC: 1}
+ }
+ self.assert_placement_pci_view(
+ "compute1", **compute1_expected_placement_view)
+ self._run_periodics()
+ self.assert_placement_pci_view(
+ "compute1", **compute1_expected_placement_view)
+
+ # Resize the instance to consume a PF and two PCI devs instead
+
+ # start a compute with enough devices for the resize
+ # The fake libvirt will emulate on the host:
+ # * two type-PCI (slot 0, 1)
+ # * one type-PFs (slot 2) with 1 type-VFs
+ compute2_pci_info = fakelibvirt.HostPCIDevicesInfo(
+ num_pci=2, num_pfs=1, num_vfs=1)
+ # the config matches the PCI devs and hte PF but not the VFs
+ compute2_device_spec = self._to_list_of_json_str(
+ [
+ {
+ "vendor_id": fakelibvirt.PCI_VEND_ID,
+ "product_id": fakelibvirt.PCI_PROD_ID,
+ "address": "0000:81:*",
+ },
+ {
+ "vendor_id": fakelibvirt.PCI_VEND_ID,
+ "product_id": fakelibvirt.PF_PROD_ID,
+ "address": "0000:81:*",
+ },
+ ]
+ )
+ self.flags(group='pci', device_spec=compute2_device_spec)
+
+ # Start a compute with PCI tracking in placement
+ self.flags(group="pci", report_in_placement=True)
+ self.start_compute(hostname="compute2", pci_info=compute2_pci_info)
+ self.assertPCIDeviceCounts("compute2", total=3, free=3)
+ compute2_expected_placement_view = {
+ "inventories": {
+ "0000:81:00.0": {self.PCI_RC: 1},
+ "0000:81:01.0": {self.PCI_RC: 1},
+ "0000:81:02.0": {self.PF_RC: 1},
+ },
+ "traits": {
+ "0000:81:00.0": [],
+ "0000:81:01.0": [],
+ "0000:81:02.0": [],
+ },
+ "usages": {
+ "0000:81:00.0": {self.PCI_RC: 0},
+ "0000:81:01.0": {self.PCI_RC: 0},
+ "0000:81:02.0": {self.PF_RC: 0},
+ },
+ "allocations": {},
+ }
+ self.assert_placement_pci_view(
+ "compute2", **compute2_expected_placement_view)
+
+ # resize the server to consume a PF and two PCI devs instead
+ extra_spec = {"pci_passthrough:alias": "a-pci-dev:2,a-pf:1"}
+ flavor_id = self._create_flavor(extra_spec=extra_spec)
+ server = self._resize_server(server, flavor_id)
+ server = self._confirm_resize(server)
+
+ # on the dest we have the new PCI allocations
+ self.assertPCIDeviceCounts("compute2", total=3, free=0)
+ compute2_expected_placement_view["usages"] = (
+ {
+ "0000:81:00.0": {self.PCI_RC: 1},
+ "0000:81:01.0": {self.PCI_RC: 1},
+ "0000:81:02.0": {self.PF_RC: 1},
+ }
+ )
+ compute2_expected_placement_view["allocations"][server["id"]] = {
+ "0000:81:00.0": {self.PCI_RC: 1},
+ "0000:81:01.0": {self.PCI_RC: 1},
+ "0000:81:02.0": {self.PF_RC: 1},
+ }
+ self.assert_placement_pci_view(
+ "compute2", **compute2_expected_placement_view)
+ self._run_periodics()
+ self.assert_placement_pci_view(
+ "compute2", **compute2_expected_placement_view)
+
+ # on the source the allocation is freed up
+ compute1_expected_placement_view[
+ "usages"]["0000:81:00.0"][self.VF_RC] = 0
+ del compute1_expected_placement_view["allocations"][server["id"]]
+ self.assert_placement_pci_view(
+ "compute1", **compute1_expected_placement_view)
+
+ def test_heal_allocation_during_same_host_resize(self):
+ self.flags(allow_resize_to_same_host=True)
+ # The fake libvirt will emulate on the host:
+ # * one type-PFs (slot 0) with 3 type-VFs
+ compute1_pci_info = fakelibvirt.HostPCIDevicesInfo(
+ num_pci=0, num_pfs=1, num_vfs=3)
+ # the config matches just the VFs
+ compute1_device_spec = self._to_list_of_json_str(
+ [
+ {
+ "vendor_id": fakelibvirt.PCI_VEND_ID,
+ "product_id": fakelibvirt.VF_PROD_ID,
+ "address": "0000:81:00.*",
+ },
+ ]
+ )
+ self.flags(group='pci', device_spec=compute1_device_spec)
+ # Start a compute with PCI tracking in placement
+ self.flags(group="pci", report_in_placement=True)
+ self.start_compute(hostname="compute1", pci_info=compute1_pci_info)
+ self.assertPCIDeviceCounts("compute1", total=3, free=3)
+ compute1_expected_placement_view = {
+ "inventories": {
+ "0000:81:00.0": {self.VF_RC: 3},
+ },
+ "traits": {
+ "0000:81:00.0": [],
+ },
+ "usages": {
+ "0000:81:00.0": {self.VF_RC: 0},
+ },
+ "allocations": {},
+ }
+ self.assert_placement_pci_view(
+ "compute1", **compute1_expected_placement_view)
+ # Create an instance consuming one VFs
+ extra_spec = {"pci_passthrough:alias": "a-vf:1"}
+ flavor_id = self._create_flavor(extra_spec=extra_spec)
+ server = self._create_server(flavor_id=flavor_id, networks=[])
+ self.assertPCIDeviceCounts("compute1", total=3, free=2)
+ # As scheduling does not support PCI in placement yet no allocation
+ # is created for the PCI consumption by the scheduler. BUT the resource
+ # tracker in the compute will heal the missing PCI allocation
+ compute1_expected_placement_view[
+ "usages"]["0000:81:00.0"][self.VF_RC] = 1
+ compute1_expected_placement_view["allocations"][server["id"]] = {
+ "0000:81:00.0": {self.VF_RC: 1}
+ }
+ self.assert_placement_pci_view(
+ "compute1", **compute1_expected_placement_view)
+ self._run_periodics()
+ self.assert_placement_pci_view(
+ "compute1", **compute1_expected_placement_view)
+
+ # resize the server to consume 2 VFs on the same host
+ extra_spec = {"pci_passthrough:alias": "a-vf:2"}
+ flavor_id = self._create_flavor(extra_spec=extra_spec)
+ server = self._resize_server(server, flavor_id)
+ # during resize both the source and the dest allocation is kept
+ # and in same host resize that means both consumed from the same host
+ self.assertPCIDeviceCounts("compute1", total=3, free=0)
+ # the source side of the allocation held by the migration
+ self._move_server_allocation(
+ compute1_expected_placement_view["allocations"], server['id'])
+ # NOTE(gibi): we intentionally don't heal allocation for the instance
+ # while it is being resized. See the comment in the
+ # pci_placement_translator about the reasoning.
+ self.assert_placement_pci_view(
+ "compute1", **compute1_expected_placement_view)
+ self._run_periodics()
+ self.assert_placement_pci_view(
+ "compute1", **compute1_expected_placement_view)
+
+ # revert the resize
+ self._revert_resize(server)
+ self.assertPCIDeviceCounts("compute1", total=3, free=2)
+ # the original allocations are restored
+ self._move_server_allocation(
+ compute1_expected_placement_view["allocations"],
+ server["id"],
+ revert=True,
+ )
+ compute1_expected_placement_view[
+ "usages"]["0000:81:00.0"][self.VF_RC] = 1
+ compute1_expected_placement_view["allocations"][server["id"]] = {
+ "0000:81:00.0": {self.VF_RC: 1}
+ }
+ self.assert_placement_pci_view(
+ "compute1", **compute1_expected_placement_view)
+ self._run_periodics()
+ self.assert_placement_pci_view(
+ "compute1", **compute1_expected_placement_view)
+
+ # now resize and then confirm it
+ self._resize_server(server, flavor_id)
+ self._confirm_resize(server)
+
+ # we expect that the consumption is according to the new flavor
+ self.assertPCIDeviceCounts("compute1", total=3, free=1)
+ compute1_expected_placement_view[
+ "usages"]["0000:81:00.0"][self.VF_RC] = 2
+ compute1_expected_placement_view["allocations"][server["id"]] = {
+ "0000:81:00.0": {self.VF_RC: 2}
+ }
+ # NOTE(gibi): This is unfortunate but during same host resize
+ # confirm when the PCI scheduling is not enabled the healing logic
+ # cannot heal the dest host allocation during the claim. It will only
+ # heal it in the next run of the ResourceTracker._update(). This due
+ # to the fact that ResourceTracker.drop_move_claim runs both for
+ # revert (on the dest) and confirm (on the source) and in same host
+ # resize this means that it runs on both the source and the dest as
+ # they are the same.
+ # Anyhow the healing will happen just a bit later. And the end goal is
+ # to make the scheduler support enabled by default and delete the
+ # whole healing logic. So I think this is acceptable.
+ self._run_periodics()
+ self.assert_placement_pci_view(
+ "compute1", **compute1_expected_placement_view)
+
+
+@ddt.ddt
+class SimpleRCAndTraitBasedPCIAliasTests(PlacementPCIReportingTests):
+ def setUp(self):
+ super().setUp()
+ self.flags(group='filter_scheduler', pci_in_placement=True)
+
+ # The fake libvirt will emulate on the host:
+ # * one type-PCI in slot 0
+ pci_info = fakelibvirt.HostPCIDevicesInfo(
+ num_pci=1, num_pfs=0, num_vfs=0)
+ device_spec = self._to_list_of_json_str(
+ [
+ {
+ "address": "0000:81:00.0",
+ "resource_class": "gpu",
+ "traits": ",".join(
+ [
+ os_traits.HW_GPU_API_VULKAN,
+ "purple",
+ "round",
+ ]
+ ),
+ },
+ ]
+ )
+ self.flags(group='pci', device_spec=device_spec)
+ self.start_compute(hostname="compute1", pci_info=pci_info)
+
+ self.assertPCIDeviceCounts("compute1", total=1, free=1)
+ self.compute1_expected_placement_view = {
+ "inventories": {
+ "0000:81:00.0": {"CUSTOM_GPU": 1},
+ },
+ "traits": {
+ "0000:81:00.0": [
+ "HW_GPU_API_VULKAN",
+ "CUSTOM_PURPLE",
+ "CUSTOM_ROUND",
+ ],
+ },
+ "usages": {
+ "0000:81:00.0": {"CUSTOM_GPU": 0},
+ },
+ "allocations": {},
+ }
+ self.assert_placement_pci_view(
+ "compute1", **self.compute1_expected_placement_view)
+
+ @ddt.data(
+ {
+ "vendor_id": fakelibvirt.PCI_VEND_ID,
+ "product_id": fakelibvirt.PCI_PROD_ID,
+ "name": "a-gpu-wrong-rc",
+ },
+ {
+ "resource_class": os_resource_classes.PGPU,
+ "name": "a-gpu-wrong-rc-2",
+ },
+ {
+ "resource_class": "GPU",
+ # NOTE(gibi): "big" is missing from device spec
+ "traits": "purple,big",
+ "name": "a-gpu-missing-trait",
+ },
+ )
+ def test_boot_with_custom_rc_and_traits_no_matching_device(
+ self, pci_alias
+ ):
+ self.flags(group="pci", alias=self._to_list_of_json_str([pci_alias]))
+ extra_spec = {"pci_passthrough:alias": f"{pci_alias['name']}:1"}
+ flavor_id = self._create_flavor(extra_spec=extra_spec)
+ server = self._create_server(
+ flavor_id=flavor_id, networks=[], expected_state="ERROR"
+ )
+ self.assertIn("fault", server)
+ self.assertIn("No valid host", server["fault"]["message"])
+
+ self.assertPCIDeviceCounts("compute1", total=1, free=1)
+ self.assert_placement_pci_view(
+ "compute1", **self.compute1_expected_placement_view
+ )
+
+ def test_boot_with_custom_rc_and_traits_succeeds(self):
+ pci_alias_gpu = {
+ "resource_class": "GPU",
+ "traits": "HW_GPU_API_VULKAN,PURPLE",
+ "name": "a-gpu",
+ }
+ self.flags(
+ group="pci", alias=self._to_list_of_json_str([pci_alias_gpu])
+ )
+
+ extra_spec = {"pci_passthrough:alias": "a-gpu:1"}
+ flavor_id = self._create_flavor(extra_spec=extra_spec)
+ server = self._create_server(flavor_id=flavor_id, networks=[])
+
+ self.assertPCIDeviceCounts("compute1", total=1, free=0)
+ self.compute1_expected_placement_view["usages"]["0000:81:00.0"][
+ "CUSTOM_GPU"
+ ] = 1
+ self.compute1_expected_placement_view["allocations"][server["id"]] = {
+ "0000:81:00.0": {"CUSTOM_GPU": 1}
+ }
+ self.assert_placement_pci_view(
+ "compute1", **self.compute1_expected_placement_view
+ )
+ self.assert_no_pci_healing("compute1")
+
+
+class RCAndTraitBasedPCIAliasTests(PlacementPCIReportingTests):
+ def setUp(self):
+ super().setUp()
+ self.flags(group='filter_scheduler', pci_in_placement=True)
+
+ def test_device_claim_consistent_with_placement_allocation(self):
+ """As soon as [filter_scheduler]pci_in_placement is enabled the
+ nova-scheduler will allocate PCI devices in placement. Then on the
+ nova-compute side the PCI claim will also allocate PCI devices in the
+ nova DB. This test will create a situation where the two allocation
+ could contradict and observes that in a contradicting situation the PCI
+ claim will fail instead of allocating a device that is not allocated in
+ placement.
+
+ For the contradiction to happen we need two PCI devices that looks
+ different from placement perspective than from the nova DB perspective.
+
+ We can do that by assigning different traits from in placement and
+ having different product_id in the Nova DB. Then we will create a
+ request that would match from placement perspective to one of the
+ device only and would match to the other device from nova DB
+ perspective. Then we will expect that the boot request fails with no
+ valid host.
+ """
+ # The fake libvirt will emulate on the host:
+ # * one type-PCI in slot 0
+ # * one type-PF in slot 1
+ pci_info = fakelibvirt.HostPCIDevicesInfo(
+ num_pci=1, num_pfs=1, num_vfs=0)
+ # we allow both device to be consumed, but we assign different traits
+ # so we can selectively schedule to one of the devices in placement
+ device_spec = self._to_list_of_json_str(
+ [
+ {
+ "address": "0000:81:00.0",
+ "resource_class": "MY_DEV",
+ "traits": "A_PCI",
+ },
+ {
+ "address": "0000:81:01.0",
+ "resource_class": "MY_DEV",
+ "traits": "A_PF",
+ },
+ ]
+ )
+ self.flags(group='pci', device_spec=device_spec)
+ self.start_compute(hostname="compute1", pci_info=pci_info)
+
+ self.assertPCIDeviceCounts("compute1", total=2, free=2)
+ compute1_expected_placement_view = {
+ "inventories": {
+ "0000:81:00.0": {"CUSTOM_MY_DEV": 1},
+ "0000:81:01.0": {"CUSTOM_MY_DEV": 1},
+ },
+ "traits": {
+ "0000:81:00.0": [
+ "CUSTOM_A_PCI",
+ ],
+ "0000:81:01.0": [
+ "CUSTOM_A_PF",
+ ],
+ },
+ "usages": {
+ "0000:81:00.0": {"CUSTOM_MY_DEV": 0},
+ "0000:81:01.0": {"CUSTOM_MY_DEV": 0},
+ },
+ "allocations": {},
+ }
+ self.assert_placement_pci_view(
+ "compute1", **compute1_expected_placement_view)
+
+ # now we create a PCI alias that cannot be fulfilled from both
+ # nova and placement perspective at the same time, but can be fulfilled
+ # from each perspective individually
+ pci_alias_no_match = {
+ "resource_class": "MY_DEV",
+ # by product_id this matches 81.00 only
+ "product_id": fakelibvirt.PCI_PROD_ID,
+ # by trait this matches 81.01 only
+ "traits": "A_PF",
+ "name": "a-pci",
+ }
+ self.flags(
+ group="pci",
+ alias=self._to_list_of_json_str([pci_alias_no_match]),
+ )
+
+ # then try to boot with the alias and expect no valid host error
+ extra_spec = {"pci_passthrough:alias": "a-pci:1"}
+ flavor_id = self._create_flavor(extra_spec=extra_spec)
+ server = self._create_server(
+ flavor_id=flavor_id, networks=[], expected_state='ERROR')
+ self.assertIn('fault', server)
+ self.assertIn('No valid host', server['fault']['message'])
+ self.assert_placement_pci_view(
+ "compute1", **compute1_expected_placement_view)
+ self.assert_no_pci_healing("compute1")
+
+ def test_vf_with_split_allocation(self):
+ # The fake libvirt will emulate on the host:
+ # * two type-PFs in slot 0, 1 with 2 VFs each
+ pci_info = fakelibvirt.HostPCIDevicesInfo(
+ num_pci=0, num_pfs=2, num_vfs=4)
+ # make all 4 VFs available
+ device_spec = self._to_list_of_json_str(
+ [
+ {
+ "product_id": fakelibvirt.VF_PROD_ID,
+ "resource_class": "MY_VF",
+ "traits": "blue",
+ },
+ ]
+ )
+ self.flags(group='pci', device_spec=device_spec)
+ self.start_compute(hostname="compute1", pci_info=pci_info)
+
+ compute1_expected_placement_view = {
+ "inventories": {
+ "0000:81:00.0": {"CUSTOM_MY_VF": 2},
+ "0000:81:01.0": {"CUSTOM_MY_VF": 2},
+ },
+ "traits": {
+ "0000:81:00.0": [
+ "CUSTOM_BLUE",
+ ],
+ "0000:81:01.0": [
+ "CUSTOM_BLUE",
+ ],
+ },
+ "usages": {
+ "0000:81:00.0": {"CUSTOM_MY_VF": 0},
+ "0000:81:01.0": {"CUSTOM_MY_VF": 0},
+ },
+ "allocations": {},
+ }
+ self.assert_placement_pci_view(
+ "compute1", **compute1_expected_placement_view)
+ self.assertPCIDeviceCounts('compute1', total=4, free=4)
+
+ pci_alias_vf = {
+ "resource_class": "MY_VF",
+ "traits": "blue",
+ "name": "a-vf",
+ }
+ self.flags(
+ group="pci",
+ alias=self._to_list_of_json_str([pci_alias_vf]),
+ )
+
+ # reserve VFs from 81.01 in placement to drive the first instance to
+ # 81.00
+ self._reserve_placement_resource(
+ "compute1_0000:81:01.0", "CUSTOM_MY_VF", 2)
+ # boot an instance with a single VF
+ # we expect that it is allocated from 81.00 as both VF on 81.01 is
+ # reserved
+ extra_spec = {"pci_passthrough:alias": "a-vf:1"}
+ flavor_id = self._create_flavor(extra_spec=extra_spec)
+ server_1vf = self._create_server(flavor_id=flavor_id, networks=[])
+
+ self.assertPCIDeviceCounts('compute1', total=4, free=3)
+ compute1_expected_placement_view["usages"] = {
+ "0000:81:00.0": {"CUSTOM_MY_VF": 1}
+ }
+ compute1_expected_placement_view["allocations"][server_1vf["id"]] = {
+ "0000:81:00.0": {"CUSTOM_MY_VF": 1},
+ }
+ self.assert_placement_pci_view(
+ "compute1", **compute1_expected_placement_view)
+ self.assert_no_pci_healing("compute1")
+
+ # Boot a second instance requesting two VFs and ensure that the only
+ # way that placement allows this is to split the two VFs between PFs.
+ # Let's remove the reservation of one resource from 81.01 so the only
+ # viable placement candidate is: one VF from 81.00 and one VF from
+ # 81.01
+ self._reserve_placement_resource(
+ "compute1_0000:81:01.0", "CUSTOM_MY_VF", 1)
+
+ extra_spec = {"pci_passthrough:alias": "a-vf:2"}
+ flavor_id = self._create_flavor(extra_spec=extra_spec)
+ server_2vf = self._create_server(flavor_id=flavor_id, networks=[])
+
+ self.assertPCIDeviceCounts('compute1', total=4, free=1)
+ compute1_expected_placement_view["usages"] = {
+ # both VM uses one VF
+ "0000:81:00.0": {"CUSTOM_MY_VF": 2},
+ "0000:81:01.0": {"CUSTOM_MY_VF": 1},
+ }
+ compute1_expected_placement_view["allocations"][server_2vf["id"]] = {
+ "0000:81:00.0": {"CUSTOM_MY_VF": 1},
+ "0000:81:01.0": {"CUSTOM_MY_VF": 1},
+ }
+ self.assert_placement_pci_view(
+ "compute1", **compute1_expected_placement_view)
+ self.assert_no_pci_healing("compute1")
+
+ def test_3vfs_asymmetric_split_between_pfs(self):
+ # The fake libvirt will emulate on the host:
+ # * two type-PFs in slot 0, 1 with 2 VFs each
+ pci_info = fakelibvirt.HostPCIDevicesInfo(
+ num_pci=0, num_pfs=2, num_vfs=4)
+ # make all 4 VFs available
+ device_spec = self._to_list_of_json_str(
+ [
+ {
+ "product_id": fakelibvirt.VF_PROD_ID,
+ "resource_class": "MY_VF",
+ "traits": "blue",
+ },
+ ]
+ )
+ self.flags(group='pci', device_spec=device_spec)
+ self.start_compute(hostname="compute1", pci_info=pci_info)
+
+ compute1_expected_placement_view = {
+ "inventories": {
+ "0000:81:00.0": {"CUSTOM_MY_VF": 2},
+ "0000:81:01.0": {"CUSTOM_MY_VF": 2},
+ },
+ "traits": {
+ "0000:81:00.0": [
+ "CUSTOM_BLUE",
+ ],
+ "0000:81:01.0": [
+ "CUSTOM_BLUE",
+ ],
+ },
+ "usages": {
+ "0000:81:00.0": {"CUSTOM_MY_VF": 0},
+ "0000:81:01.0": {"CUSTOM_MY_VF": 0},
+ },
+ "allocations": {},
+ }
+ self.assert_placement_pci_view(
+ "compute1", **compute1_expected_placement_view)
+ self.assertPCIDeviceCounts('compute1', total=4, free=4)
+
+ pci_alias_vf = {
+ "resource_class": "MY_VF",
+ "traits": "blue",
+ "name": "a-vf",
+ }
+ self.flags(
+ group="pci",
+ alias=self._to_list_of_json_str([pci_alias_vf]),
+ )
+
+ # Boot an instance requesting three VFs. The 3 VFs can be split between
+ # the two PFs two ways: 2 from 81.00 and 1 from 81.01, or 1 from 81.00
+ # and 2 from 81.01.
+ # Let's block the first way in placement by reserving 1 device from
+ # 81.00
+ self._reserve_placement_resource(
+ "compute1_0000:81:00.0", "CUSTOM_MY_VF", 1)
+ extra_spec = {"pci_passthrough:alias": "a-vf:3"}
+ flavor_id = self._create_flavor(extra_spec=extra_spec)
+ # We expect this to fit.
+ server_3vf = self._create_server(flavor_id=flavor_id, networks=[])
+
+ self.assertPCIDeviceCounts('compute1', total=4, free=1)
+ compute1_expected_placement_view["usages"] = {
+ "0000:81:00.0": {"CUSTOM_MY_VF": 1},
+ "0000:81:01.0": {"CUSTOM_MY_VF": 2},
+ }
+ compute1_expected_placement_view["allocations"][server_3vf["id"]] = {
+ "0000:81:00.0": {"CUSTOM_MY_VF": 1},
+ "0000:81:01.0": {"CUSTOM_MY_VF": 2},
+ }
+ self.assert_placement_pci_view(
+ "compute1", **compute1_expected_placement_view)
+ self.assert_no_pci_healing("compute1")
diff --git a/nova/tests/functional/libvirt/test_pci_sriov_servers.py b/nova/tests/functional/libvirt/test_pci_sriov_servers.py
index 2aa95a3016..098a0e857b 100644
--- a/nova/tests/functional/libvirt/test_pci_sriov_servers.py
+++ b/nova/tests/functional/libvirt/test_pci_sriov_servers.py
@@ -14,12 +14,14 @@
# under the License.
import copy
+import pprint
+import typing as ty
+from unittest import mock
from urllib import parse as urlparse
import ddt
import fixtures
from lxml import etree
-import mock
from oslo_config import cfg
from oslo_log import log as logging
from oslo_serialization import jsonutils
@@ -27,10 +29,13 @@ from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import units
import nova
+from nova.compute import pci_placement_translator
from nova import context
+from nova import exception
from nova.network import constants
from nova import objects
from nova.objects import fields
+from nova.pci.utils import parse_address
from nova.tests import fixtures as nova_fixtures
from nova.tests.fixtures import libvirt as fakelibvirt
from nova.tests.functional.api import client
@@ -40,15 +45,65 @@ CONF = cfg.CONF
LOG = logging.getLogger(__name__)
+class PciPlacementHealingFixture(fixtures.Fixture):
+ """Allow asserting if the pci_placement_translator module needed to
+ heal PCI allocations. Such healing is only normal during upgrade. After
+ every compute is upgraded and the scheduling support of PCI tracking in
+ placement is enabled there should be no need to heal PCI allocations in
+ the resource tracker. We assert this as we eventually want to remove the
+ automatic healing logic from the resource tracker.
+ """
+
+ def __init__(self):
+ super().__init__()
+ # a list of (nodename, result, allocation_before, allocation_after)
+ # tuples recoding the result of the calls to
+ # update_provider_tree_for_pci
+ self.calls = []
+
+ def setUp(self):
+ super().setUp()
+
+ orig = pci_placement_translator.update_provider_tree_for_pci
+
+ def wrapped_update(
+ provider_tree, nodename, pci_tracker, allocations, same_host
+ ):
+ alloc_before = copy.deepcopy(allocations)
+ updated = orig(
+ provider_tree, nodename, pci_tracker, allocations, same_host)
+ alloc_after = copy.deepcopy(allocations)
+ self.calls.append((nodename, updated, alloc_before, alloc_after))
+ return updated
+
+ self.useFixture(
+ fixtures.MonkeyPatch(
+ "nova.compute.pci_placement_translator."
+ "update_provider_tree_for_pci",
+ wrapped_update,
+ )
+ )
+
+ def last_healing(self, hostname: str) -> ty.Optional[ty.Tuple[dict, dict]]:
+ for h, updated, before, after in self.calls:
+ if h == hostname and updated:
+ return before, after
+ return None
+
+
class _PCIServersTestBase(base.ServersTestBase):
ADDITIONAL_FILTERS = ['NUMATopologyFilter', 'PciPassthroughFilter']
+ PCI_RC = f"CUSTOM_PCI_{fakelibvirt.PCI_VEND_ID}_{fakelibvirt.PCI_PROD_ID}"
+
def setUp(self):
self.ctxt = context.get_admin_context()
- self.flags(passthrough_whitelist=self.PCI_PASSTHROUGH_WHITELIST,
- alias=self.PCI_ALIAS,
- group='pci')
+ self.flags(
+ device_spec=self.PCI_DEVICE_SPEC,
+ alias=self.PCI_ALIAS,
+ group='pci'
+ )
super(_PCIServersTestBase, self).setUp()
@@ -62,6 +117,9 @@ class _PCIServersTestBase(base.ServersTestBase):
'.PciPassthroughFilter.host_passes',
side_effect=host_pass_mock)).mock
+ self.pci_healing_fixture = self.useFixture(
+ PciPlacementHealingFixture())
+
def assertPCIDeviceCounts(self, hostname, total, free):
"""Ensure $hostname has $total devices, $free of which are free."""
devices = objects.PciDeviceList.get_by_compute_node(
@@ -71,8 +129,218 @@ class _PCIServersTestBase(base.ServersTestBase):
self.assertEqual(total, len(devices))
self.assertEqual(free, len([d for d in devices if d.is_available()]))
+ def assert_no_pci_healing(self, hostname):
+ last_healing = self.pci_healing_fixture.last_healing(hostname)
+ before = last_healing[0] if last_healing else None
+ after = last_healing[1] if last_healing else None
+ self.assertIsNone(
+ last_healing,
+ "The resource tracker needed to heal PCI allocation in placement "
+ "on host %s. This should not happen in normal operation as the "
+ "scheduler should create the proper allocation instead.\n"
+ "Allocations before healing:\n %s\n"
+ "Allocations after healing:\n %s\n"
+ % (
+ hostname,
+ pprint.pformat(before),
+ pprint.pformat(after),
+ ),
+ )
+
+ def _get_rp_by_name(self, name, rps):
+ for rp in rps:
+ if rp["name"] == name:
+ return rp
+ self.fail(f'RP {name} is not found in Placement {rps}')
+
+ def assert_placement_pci_inventory(self, hostname, inventories, traits):
+ compute_rp_uuid = self.compute_rp_uuids[hostname]
+ rps = self._get_all_rps_in_a_tree(compute_rp_uuid)
+
+ # rps also contains the root provider so we subtract 1
+ self.assertEqual(
+ len(inventories),
+ len(rps) - 1,
+ f"Number of RPs on {hostname} doesn't match. "
+ f"Expected {list(inventories)} actual {[rp['name'] for rp in rps]}"
+ )
+
+ for rp_name, inv in inventories.items():
+ real_rp_name = f'{hostname}_{rp_name}'
+ rp = self._get_rp_by_name(real_rp_name, rps)
+ rp_inv = self._get_provider_inventory(rp['uuid'])
+
+ self.assertEqual(
+ len(inv),
+ len(rp_inv),
+ f"Number of inventories on {real_rp_name} are not as "
+ f"expected. Expected {inv}, actual {rp_inv}"
+ )
+ for rc, total in inv.items():
+ self.assertEqual(
+ total,
+ rp_inv[rc]["total"])
+ self.assertEqual(
+ total,
+ rp_inv[rc]["max_unit"])
+
+ rp_traits = self._get_provider_traits(rp['uuid'])
+ self.assertEqual(
+ # COMPUTE_MANAGED_PCI_DEVICE is automatically reported on
+ # PCI device RPs by nova
+ set(traits[rp_name]) | {"COMPUTE_MANAGED_PCI_DEVICE"},
+ set(rp_traits),
+ f"Traits on RP {real_rp_name} does not match with expectation"
+ )
-class SRIOVServersTest(_PCIServersTestBase):
+ def assert_placement_pci_usages(self, hostname, usages):
+ compute_rp_uuid = self.compute_rp_uuids[hostname]
+ rps = self._get_all_rps_in_a_tree(compute_rp_uuid)
+
+ for rp_name, usage in usages.items():
+ real_rp_name = f'{hostname}_{rp_name}'
+ rp = self._get_rp_by_name(real_rp_name, rps)
+ rp_usage = self._get_provider_usages(rp['uuid'])
+ self.assertEqual(
+ usage,
+ rp_usage,
+ f"Usage on RP {real_rp_name} does not match with expectation"
+ )
+
+ def assert_placement_pci_allocations(self, allocations):
+ for consumer, expected_allocations in allocations.items():
+ actual_allocations = self._get_allocations_by_server_uuid(consumer)
+ self.assertEqual(
+ len(expected_allocations),
+ len(actual_allocations),
+ f"The consumer {consumer} allocates from different number of "
+ f"RPs than expected. Expected: {expected_allocations}, "
+ f"Actual: {actual_allocations}"
+ )
+ for rp_name, expected_rp_allocs in expected_allocations.items():
+ rp_uuid = self._get_provider_uuid_by_name(rp_name)
+ self.assertIn(
+ rp_uuid,
+ actual_allocations,
+ f"The consumer {consumer} expected to allocate from "
+ f"{rp_name}. Expected: {expected_allocations}, "
+ f"Actual: {actual_allocations}"
+ )
+ actual_rp_allocs = actual_allocations[rp_uuid]['resources']
+ self.assertEqual(
+ expected_rp_allocs,
+ actual_rp_allocs,
+ f"The consumer {consumer} expected to have allocation "
+ f"{expected_rp_allocs} on {rp_name} but it has "
+ f"{actual_rp_allocs} instead."
+ )
+
+ def assert_placement_pci_allocations_on_host(self, hostname, allocations):
+ compute_rp_uuid = self.compute_rp_uuids[hostname]
+ rps = self._get_all_rps_in_a_tree(compute_rp_uuid)
+
+ for consumer, expected_allocations in allocations.items():
+ actual_allocations = self._get_allocations_by_server_uuid(consumer)
+ self.assertEqual(
+ len(expected_allocations),
+ # actual_allocations also contains allocations against the
+ # root provider for VCPU, MEMORY_MB, and DISK_GB so subtract
+ # one
+ len(actual_allocations) - 1,
+ f"The consumer {consumer} allocates from different number of "
+ f"RPs than expected. Expected: {expected_allocations}, "
+ f"Actual: {actual_allocations}"
+ )
+ for rp_name, expected_rp_allocs in expected_allocations.items():
+ real_rp_name = f'{hostname}_{rp_name}'
+ rp = self._get_rp_by_name(real_rp_name, rps)
+ self.assertIn(
+ rp['uuid'],
+ actual_allocations,
+ f"The consumer {consumer} expected to allocate from "
+ f"{rp['uuid']}. Expected: {expected_allocations}, "
+ f"Actual: {actual_allocations}"
+ )
+ actual_rp_allocs = actual_allocations[rp['uuid']]['resources']
+ self.assertEqual(
+ expected_rp_allocs,
+ actual_rp_allocs,
+ f"The consumer {consumer} expected to have allocation "
+ f"{expected_rp_allocs} on {rp_name} but it has "
+ f"{actual_rp_allocs} instead."
+ )
+
+ def assert_placement_pci_view(
+ self, hostname, inventories, traits, usages=None, allocations=None
+ ):
+ if not usages:
+ usages = {}
+
+ if not allocations:
+ allocations = {}
+
+ self.assert_placement_pci_inventory(hostname, inventories, traits)
+ self.assert_placement_pci_usages(hostname, usages)
+ self.assert_placement_pci_allocations_on_host(hostname, allocations)
+
+ @staticmethod
+ def _to_list_of_json_str(list):
+ return [jsonutils.dumps(x) for x in list]
+
+ @staticmethod
+ def _move_allocation(allocations, from_uuid, to_uuid):
+ allocations[to_uuid] = allocations[from_uuid]
+ del allocations[from_uuid]
+
+ def _move_server_allocation(self, allocations, server_uuid, revert=False):
+ migration_uuid = self.get_migration_uuid_for_instance(server_uuid)
+ if revert:
+ self._move_allocation(allocations, migration_uuid, server_uuid)
+ else:
+ self._move_allocation(allocations, server_uuid, migration_uuid)
+
+
+class _PCIServersWithMigrationTestBase(_PCIServersTestBase):
+
+ def setUp(self):
+ super().setUp()
+
+ self.useFixture(fixtures.MonkeyPatch(
+ 'nova.tests.fixtures.libvirt.Domain.migrateToURI3',
+ self._migrate_stub))
+
+ def _migrate_stub(self, domain, destination, params, flags):
+ """Stub out migrateToURI3."""
+
+ src_hostname = domain._connection.hostname
+ dst_hostname = urlparse.urlparse(destination).netloc
+
+ # In a real live migration, libvirt and QEMU on the source and
+ # destination talk it out, resulting in the instance starting to exist
+ # on the destination. Fakelibvirt cannot do that, so we have to
+ # manually create the "incoming" instance on the destination
+ # fakelibvirt.
+ dst = self.computes[dst_hostname]
+ dst.driver._host.get_connection().createXML(
+ params['destination_xml'],
+ 'fake-createXML-doesnt-care-about-flags')
+
+ src = self.computes[src_hostname]
+ conn = src.driver._host.get_connection()
+
+ # because migrateToURI3 is spawned in a background thread, this method
+ # does not block the upper nova layers. Because we don't want nova to
+ # think the live migration has finished until this method is done, the
+ # last thing we do is make fakelibvirt's Domain.jobStats() return
+ # VIR_DOMAIN_JOB_COMPLETED.
+ server = etree.fromstring(
+ params['destination_xml']
+ ).find('./uuid').text
+ dom = conn.lookupByUUIDString(server)
+ dom.complete_job()
+
+
+class SRIOVServersTest(_PCIServersWithMigrationTestBase):
# TODO(stephenfin): We're using this because we want to be able to force
# the host during scheduling. We should instead look at overriding policy
@@ -82,7 +350,7 @@ class SRIOVServersTest(_PCIServersTestBase):
VFS_ALIAS_NAME = 'vfs'
PFS_ALIAS_NAME = 'pfs'
- PCI_PASSTHROUGH_WHITELIST = [jsonutils.dumps(x) for x in (
+ PCI_DEVICE_SPEC = [jsonutils.dumps(x) for x in (
{
'vendor_id': fakelibvirt.PCI_VEND_ID,
'product_id': fakelibvirt.PF_PROD_ID,
@@ -120,40 +388,6 @@ class SRIOVServersTest(_PCIServersTestBase):
# fixture already stubbed.
self.neutron = self.useFixture(base.LibvirtNeutronFixture(self))
- self.useFixture(fixtures.MonkeyPatch(
- 'nova.tests.fixtures.libvirt.Domain.migrateToURI3',
- self._migrate_stub))
-
- def _migrate_stub(self, domain, destination, params, flags):
- """Stub out migrateToURI3."""
-
- src_hostname = domain._connection.hostname
- dst_hostname = urlparse.urlparse(destination).netloc
-
- # In a real live migration, libvirt and QEMU on the source and
- # destination talk it out, resulting in the instance starting to exist
- # on the destination. Fakelibvirt cannot do that, so we have to
- # manually create the "incoming" instance on the destination
- # fakelibvirt.
- dst = self.computes[dst_hostname]
- dst.driver._host.get_connection().createXML(
- params['destination_xml'],
- 'fake-createXML-doesnt-care-about-flags')
-
- src = self.computes[src_hostname]
- conn = src.driver._host.get_connection()
-
- # because migrateToURI3 is spawned in a background thread, this method
- # does not block the upper nova layers. Because we don't want nova to
- # think the live migration has finished until this method is done, the
- # last thing we do is make fakelibvirt's Domain.jobStats() return
- # VIR_DOMAIN_JOB_COMPLETED.
- server = etree.fromstring(
- params['destination_xml']
- ).find('./uuid').text
- dom = conn.lookupByUUIDString(server)
- dom.complete_job()
-
def _disable_sriov_in_pf(self, pci_info):
# Check for PF and change the capability from virt_functions
# Delete all the VFs
@@ -357,31 +591,66 @@ class SRIOVServersTest(_PCIServersTestBase):
expect_fail=False):
# The purpose here is to force an observable PCI slot update when
# moving from source to dest. This is accomplished by having a single
- # PCI device on the source, 2 PCI devices on the test, and relying on
- # the fact that our fake HostPCIDevicesInfo creates predictable PCI
- # addresses. The PCI device on source and the first PCI device on dest
- # will have identical PCI addresses. By sticking a "placeholder"
- # instance on that first PCI device on the dest, the incoming instance
- # from source will be forced to consume the second dest PCI device,
- # with a different PCI address.
+ # PCI VF device on the source, 2 PCI VF devices on the dest, and
+ # relying on the fact that our fake HostPCIDevicesInfo creates
+ # predictable PCI addresses. The PCI VF device on source and the first
+ # PCI VF device on dest will have identical PCI addresses. By sticking
+ # a "placeholder" instance on that first PCI VF device on the dest, the
+ # incoming instance from source will be forced to consume the second
+ # dest PCI VF device, with a different PCI address.
+ # We want to test server operations with SRIOV VFs and SRIOV PFs so
+ # the config of the compute hosts also have one extra PCI PF devices
+ # without any VF children. But the two compute has different PCI PF
+ # addresses and MAC so that the test can observe the slot update as
+ # well as the MAC updated during migration and after revert.
+ source_pci_info = fakelibvirt.HostPCIDevicesInfo(num_pfs=1, num_vfs=1)
+ # add an extra PF without VF to be used by direct-physical ports
+ source_pci_info.add_device(
+ dev_type='PF',
+ bus=0x82, # the HostPCIDevicesInfo use the 0x81 by default
+ slot=0x0,
+ function=0,
+ iommu_group=42,
+ numa_node=0,
+ vf_ratio=0,
+ mac_address='b4:96:91:34:f4:aa',
+ )
self.start_compute(
hostname='source',
- pci_info=fakelibvirt.HostPCIDevicesInfo(
- num_pfs=1, num_vfs=1))
+ pci_info=source_pci_info)
+
+ dest_pci_info = fakelibvirt.HostPCIDevicesInfo(num_pfs=1, num_vfs=2)
+ # add an extra PF without VF to be used by direct-physical ports
+ dest_pci_info.add_device(
+ dev_type='PF',
+ bus=0x82, # the HostPCIDevicesInfo use the 0x81 by default
+ slot=0x6, # make it different from the source host
+ function=0,
+ iommu_group=42,
+ numa_node=0,
+ vf_ratio=0,
+ mac_address='b4:96:91:34:f4:bb',
+ )
self.start_compute(
hostname='dest',
- pci_info=fakelibvirt.HostPCIDevicesInfo(
- num_pfs=1, num_vfs=2))
+ pci_info=dest_pci_info)
source_port = self.neutron.create_port(
{'port': self.neutron.network_4_port_1})
+ source_pf_port = self.neutron.create_port(
+ {'port': self.neutron.network_4_port_pf})
dest_port1 = self.neutron.create_port(
{'port': self.neutron.network_4_port_2})
dest_port2 = self.neutron.create_port(
{'port': self.neutron.network_4_port_3})
source_server = self._create_server(
- networks=[{'port': source_port['port']['id']}], host='source')
+ networks=[
+ {'port': source_port['port']['id']},
+ {'port': source_pf_port['port']['id']}
+ ],
+ host='source',
+ )
dest_server1 = self._create_server(
networks=[{'port': dest_port1['port']['id']}], host='dest')
dest_server2 = self._create_server(
@@ -389,6 +658,7 @@ class SRIOVServersTest(_PCIServersTestBase):
# Refresh the ports.
source_port = self.neutron.show_port(source_port['port']['id'])
+ source_pf_port = self.neutron.show_port(source_pf_port['port']['id'])
dest_port1 = self.neutron.show_port(dest_port1['port']['id'])
dest_port2 = self.neutron.show_port(dest_port2['port']['id'])
@@ -404,11 +674,24 @@ class SRIOVServersTest(_PCIServersTestBase):
same_slot_port = dest_port2
self._delete_server(dest_server1)
- # Before moving, explictly assert that the servers on source and dest
+ # Before moving, explicitly assert that the servers on source and dest
# have the same pci_slot in their port's binding profile
self.assertEqual(source_port['port']['binding:profile']['pci_slot'],
same_slot_port['port']['binding:profile']['pci_slot'])
+ # Assert that the direct-physical port got the pci_slot information
+ # according to the source host PF PCI device.
+ self.assertEqual(
+ '0000:82:00.0', # which is in sync with the source host pci_info
+ source_pf_port['port']['binding:profile']['pci_slot']
+ )
+ # Assert that the direct-physical port is updated with the MAC address
+ # of the PF device from the source host
+ self.assertEqual(
+ 'b4:96:91:34:f4:aa',
+ source_pf_port['port']['binding:profile']['device_mac_address']
+ )
+
# Before moving, assert that the servers on source and dest have the
# same PCI source address in their XML for their SRIOV nic.
source_conn = self.computes['source'].driver._host.get_connection()
@@ -425,14 +708,28 @@ class SRIOVServersTest(_PCIServersTestBase):
move_operation(source_server)
# Refresh the ports again, keeping in mind the source_port is now bound
- # on the dest after unshelving.
+ # on the dest after the move.
source_port = self.neutron.show_port(source_port['port']['id'])
same_slot_port = self.neutron.show_port(same_slot_port['port']['id'])
+ source_pf_port = self.neutron.show_port(source_pf_port['port']['id'])
self.assertNotEqual(
source_port['port']['binding:profile']['pci_slot'],
same_slot_port['port']['binding:profile']['pci_slot'])
+ # Assert that the direct-physical port got the pci_slot information
+ # according to the dest host PF PCI device.
+ self.assertEqual(
+ '0000:82:06.0', # which is in sync with the dest host pci_info
+ source_pf_port['port']['binding:profile']['pci_slot']
+ )
+ # Assert that the direct-physical port is updated with the MAC address
+ # of the PF device from the dest host
+ self.assertEqual(
+ 'b4:96:91:34:f4:bb',
+ source_pf_port['port']['binding:profile']['device_mac_address']
+ )
+
conn = self.computes['dest'].driver._host.get_connection()
vms = [vm._def for vm in conn._vms.values()]
self.assertEqual(2, len(vms))
@@ -460,6 +757,169 @@ class SRIOVServersTest(_PCIServersTestBase):
self._confirm_resize(source_server)
self._test_move_operation_with_neutron(move_operation)
+ def test_cold_migrate_and_rever_server_with_neutron(self):
+ # The purpose here is to force an observable PCI slot update when
+ # moving from source to dest and the from dest to source after the
+ # revert. This is accomplished by having a single
+ # PCI VF device on the source, 2 PCI VF devices on the dest, and
+ # relying on the fact that our fake HostPCIDevicesInfo creates
+ # predictable PCI addresses. The PCI VF device on source and the first
+ # PCI VF device on dest will have identical PCI addresses. By sticking
+ # a "placeholder" instance on that first PCI VF device on the dest, the
+ # incoming instance from source will be forced to consume the second
+ # dest PCI VF device, with a different PCI address.
+ # We want to test server operations with SRIOV VFs and SRIOV PFs so
+ # the config of the compute hosts also have one extra PCI PF devices
+ # without any VF children. But the two compute has different PCI PF
+ # addresses and MAC so that the test can observe the slot update as
+ # well as the MAC updated during migration and after revert.
+ source_pci_info = fakelibvirt.HostPCIDevicesInfo(num_pfs=1, num_vfs=1)
+ # add an extra PF without VF to be used by direct-physical ports
+ source_pci_info.add_device(
+ dev_type='PF',
+ bus=0x82, # the HostPCIDevicesInfo use the 0x81 by default
+ slot=0x0,
+ function=0,
+ iommu_group=42,
+ numa_node=0,
+ vf_ratio=0,
+ mac_address='b4:96:91:34:f4:aa',
+ )
+ self.start_compute(
+ hostname='source',
+ pci_info=source_pci_info)
+ dest_pci_info = fakelibvirt.HostPCIDevicesInfo(num_pfs=1, num_vfs=2)
+ # add an extra PF without VF to be used by direct-physical ports
+ dest_pci_info.add_device(
+ dev_type='PF',
+ bus=0x82, # the HostPCIDevicesInfo use the 0x81 by default
+ slot=0x6, # make it different from the source host
+ function=0,
+ iommu_group=42,
+ numa_node=0,
+ vf_ratio=0,
+ mac_address='b4:96:91:34:f4:bb',
+ )
+ self.start_compute(
+ hostname='dest',
+ pci_info=dest_pci_info)
+ source_port = self.neutron.create_port(
+ {'port': self.neutron.network_4_port_1})
+ source_pf_port = self.neutron.create_port(
+ {'port': self.neutron.network_4_port_pf})
+ dest_port1 = self.neutron.create_port(
+ {'port': self.neutron.network_4_port_2})
+ dest_port2 = self.neutron.create_port(
+ {'port': self.neutron.network_4_port_3})
+ source_server = self._create_server(
+ networks=[
+ {'port': source_port['port']['id']},
+ {'port': source_pf_port['port']['id']}
+ ],
+ host='source',
+ )
+ dest_server1 = self._create_server(
+ networks=[{'port': dest_port1['port']['id']}], host='dest')
+ dest_server2 = self._create_server(
+ networks=[{'port': dest_port2['port']['id']}], host='dest')
+ # Refresh the ports.
+ source_port = self.neutron.show_port(source_port['port']['id'])
+ source_pf_port = self.neutron.show_port(source_pf_port['port']['id'])
+ dest_port1 = self.neutron.show_port(dest_port1['port']['id'])
+ dest_port2 = self.neutron.show_port(dest_port2['port']['id'])
+ # Find the server on the dest compute that's using the same pci_slot as
+ # the server on the source compute, and delete the other one to make
+ # room for the incoming server from the source.
+ source_pci_slot = source_port['port']['binding:profile']['pci_slot']
+ dest_pci_slot1 = dest_port1['port']['binding:profile']['pci_slot']
+ if dest_pci_slot1 == source_pci_slot:
+ same_slot_port = dest_port1
+ self._delete_server(dest_server2)
+ else:
+ same_slot_port = dest_port2
+ self._delete_server(dest_server1)
+ # Before moving, explicitly assert that the servers on source and dest
+ # have the same pci_slot in their port's binding profile
+ self.assertEqual(source_port['port']['binding:profile']['pci_slot'],
+ same_slot_port['port']['binding:profile']['pci_slot'])
+ # Assert that the direct-physical port got the pci_slot information
+ # according to the source host PF PCI device.
+ self.assertEqual(
+ '0000:82:00.0', # which is in sync with the source host pci_info
+ source_pf_port['port']['binding:profile']['pci_slot']
+ )
+ # Assert that the direct-physical port is updated with the MAC address
+ # of the PF device from the source host
+ self.assertEqual(
+ 'b4:96:91:34:f4:aa',
+ source_pf_port['port']['binding:profile']['device_mac_address']
+ )
+ # Before moving, assert that the servers on source and dest have the
+ # same PCI source address in their XML for their SRIOV nic.
+ source_conn = self.computes['source'].driver._host.get_connection()
+ dest_conn = self.computes['source'].driver._host.get_connection()
+ source_vms = [vm._def for vm in source_conn._vms.values()]
+ dest_vms = [vm._def for vm in dest_conn._vms.values()]
+ self.assertEqual(1, len(source_vms))
+ self.assertEqual(1, len(dest_vms))
+ self.assertEqual(1, len(source_vms[0]['devices']['nics']))
+ self.assertEqual(1, len(dest_vms[0]['devices']['nics']))
+ self.assertEqual(source_vms[0]['devices']['nics'][0]['source'],
+ dest_vms[0]['devices']['nics'][0]['source'])
+
+ # TODO(stephenfin): The mock of 'migrate_disk_and_power_off' should
+ # probably be less...dumb
+ with mock.patch('nova.virt.libvirt.driver.LibvirtDriver'
+ '.migrate_disk_and_power_off', return_value='{}'):
+ self._migrate_server(source_server)
+
+ # Refresh the ports again, keeping in mind the ports are now bound
+ # on the dest after migrating.
+ source_port = self.neutron.show_port(source_port['port']['id'])
+ same_slot_port = self.neutron.show_port(same_slot_port['port']['id'])
+ source_pf_port = self.neutron.show_port(source_pf_port['port']['id'])
+ self.assertNotEqual(
+ source_port['port']['binding:profile']['pci_slot'],
+ same_slot_port['port']['binding:profile']['pci_slot'])
+ # Assert that the direct-physical port got the pci_slot information
+ # according to the dest host PF PCI device.
+ self.assertEqual(
+ '0000:82:06.0', # which is in sync with the dest host pci_info
+ source_pf_port['port']['binding:profile']['pci_slot']
+ )
+ # Assert that the direct-physical port is updated with the MAC address
+ # of the PF device from the dest host
+ self.assertEqual(
+ 'b4:96:91:34:f4:bb',
+ source_pf_port['port']['binding:profile']['device_mac_address']
+ )
+ conn = self.computes['dest'].driver._host.get_connection()
+ vms = [vm._def for vm in conn._vms.values()]
+ self.assertEqual(2, len(vms))
+ for vm in vms:
+ self.assertEqual(1, len(vm['devices']['nics']))
+ self.assertNotEqual(vms[0]['devices']['nics'][0]['source'],
+ vms[1]['devices']['nics'][0]['source'])
+
+ self._revert_resize(source_server)
+
+ # Refresh the ports again, keeping in mind the ports are now bound
+ # on the source as the migration is reverted
+ source_pf_port = self.neutron.show_port(source_pf_port['port']['id'])
+
+ # Assert that the direct-physical port got the pci_slot information
+ # according to the source host PF PCI device.
+ self.assertEqual(
+ '0000:82:00.0', # which is in sync with the source host pci_info
+ source_pf_port['port']['binding:profile']['pci_slot']
+ )
+ # Assert that the direct-physical port is updated with the MAC address
+ # of the PF device from the source host
+ self.assertEqual(
+ 'b4:96:91:34:f4:aa',
+ source_pf_port['port']['binding:profile']['device_mac_address']
+ )
+
def test_evacuate_server_with_neutron(self):
def move_operation(source_server):
# Down the source compute to enable the evacuation
@@ -477,17 +937,44 @@ class SRIOVServersTest(_PCIServersTestBase):
"""
# start two compute services with differing PCI device inventory
- self.start_compute(
- hostname='test_compute0',
- pci_info=fakelibvirt.HostPCIDevicesInfo(
- num_pfs=2, num_vfs=8, numa_node=0))
- self.start_compute(
- hostname='test_compute1',
- pci_info=fakelibvirt.HostPCIDevicesInfo(
- num_pfs=1, num_vfs=2, numa_node=1))
+ source_pci_info = fakelibvirt.HostPCIDevicesInfo(
+ num_pfs=1, num_vfs=4, numa_node=0)
+ # add an extra PF without VF to be used by direct-physical ports
+ source_pci_info.add_device(
+ dev_type='PF',
+ bus=0x82, # the HostPCIDevicesInfo use the 0x81 by default
+ slot=0x0,
+ function=0,
+ iommu_group=42,
+ numa_node=0,
+ vf_ratio=0,
+ mac_address='b4:96:91:34:f4:aa',
+ )
+ self.start_compute(hostname='test_compute0', pci_info=source_pci_info)
- # create the port
- self.neutron.create_port({'port': self.neutron.network_4_port_1})
+ dest_pci_info = fakelibvirt.HostPCIDevicesInfo(
+ num_pfs=1, num_vfs=2, numa_node=1)
+ # add an extra PF without VF to be used by direct-physical ports
+ dest_pci_info.add_device(
+ dev_type='PF',
+ bus=0x82, # the HostPCIDevicesInfo use the 0x81 by default
+ slot=0x6, # make it different from the source host
+ function=0,
+ iommu_group=42,
+ # numa node needs to be aligned with the other pci devices in this
+ # host as the instance needs to fit into a single host numa node
+ numa_node=1,
+ vf_ratio=0,
+ mac_address='b4:96:91:34:f4:bb',
+ )
+
+ self.start_compute(hostname='test_compute1', pci_info=dest_pci_info)
+
+ # create the ports
+ port = self.neutron.create_port(
+ {'port': self.neutron.network_4_port_1})['port']
+ pf_port = self.neutron.create_port(
+ {'port': self.neutron.network_4_port_pf})['port']
# create a server using the VF via neutron
extra_spec = {'hw:cpu_policy': 'dedicated'}
@@ -495,7 +982,8 @@ class SRIOVServersTest(_PCIServersTestBase):
server = self._create_server(
flavor_id=flavor_id,
networks=[
- {'port': base.LibvirtNeutronFixture.network_4_port_1['id']},
+ {'port': port['id']},
+ {'port': pf_port['id']},
],
host='test_compute0',
)
@@ -503,8 +991,8 @@ class SRIOVServersTest(_PCIServersTestBase):
# our source host should have marked two PCI devices as used, the VF
# and the parent PF, while the future destination is currently unused
self.assertEqual('test_compute0', server['OS-EXT-SRV-ATTR:host'])
- self.assertPCIDeviceCounts('test_compute0', total=10, free=8)
- self.assertPCIDeviceCounts('test_compute1', total=3, free=3)
+ self.assertPCIDeviceCounts('test_compute0', total=6, free=3)
+ self.assertPCIDeviceCounts('test_compute1', total=4, free=4)
# the instance should be on host NUMA node 0, since that's where our
# PCI devices are
@@ -527,19 +1015,32 @@ class SRIOVServersTest(_PCIServersTestBase):
# TODO(stephenfin): Stop relying on a side-effect of how nova
# chooses from multiple PCI devices (apparently the last
# matching one)
- 'pci_slot': '0000:81:01.4',
+ 'pci_slot': '0000:81:00.4',
'physical_network': 'physnet4',
},
port['binding:profile'],
)
+ # ensure the binding details sent to "neutron" are correct
+ pf_port = self.neutron.show_port(pf_port['id'],)['port']
+ self.assertIn('binding:profile', pf_port)
+ self.assertEqual(
+ {
+ 'pci_vendor_info': '8086:1528',
+ 'pci_slot': '0000:82:00.0',
+ 'physical_network': 'physnet4',
+ 'device_mac_address': 'b4:96:91:34:f4:aa',
+ },
+ pf_port['binding:profile'],
+ )
+
# now live migrate that server
self._live_migrate(server, 'completed')
# we should now have transitioned our usage to the destination, freeing
# up the source in the process
- self.assertPCIDeviceCounts('test_compute0', total=10, free=10)
- self.assertPCIDeviceCounts('test_compute1', total=3, free=1)
+ self.assertPCIDeviceCounts('test_compute0', total=6, free=6)
+ self.assertPCIDeviceCounts('test_compute1', total=4, free=1)
# the instance should now be on host NUMA node 1, since that's where
# our PCI devices are for this second host
@@ -564,6 +1065,18 @@ class SRIOVServersTest(_PCIServersTestBase):
},
port['binding:profile'],
)
+ # ensure the binding details sent to "neutron" are correct
+ pf_port = self.neutron.show_port(pf_port['id'],)['port']
+ self.assertIn('binding:profile', pf_port)
+ self.assertEqual(
+ {
+ 'pci_vendor_info': '8086:1528',
+ 'pci_slot': '0000:82:06.0',
+ 'physical_network': 'physnet4',
+ 'device_mac_address': 'b4:96:91:34:f4:bb',
+ },
+ pf_port['binding:profile'],
+ )
def test_get_server_diagnostics_server_with_VF(self):
"""Ensure server disagnostics include info on VF-type PCI devices."""
@@ -622,11 +1135,8 @@ class SRIOVServersTest(_PCIServersTestBase):
# Disable SRIOV capabilties in PF and delete the VFs
self._disable_sriov_in_pf(pci_info_no_sriov)
- fake_connection = self._get_connection(pci_info=pci_info_no_sriov,
- hostname='test_compute0')
- self.mock_conn.return_value = fake_connection
-
- self.compute = self.start_service('compute', host='test_compute0')
+ self.start_compute('test_compute0', pci_info=pci_info_no_sriov)
+ self.compute = self.computes['test_compute0']
ctxt = context.get_admin_context()
pci_devices = objects.PciDeviceList.get_by_compute_node(
@@ -638,13 +1148,9 @@ class SRIOVServersTest(_PCIServersTestBase):
self.assertEqual(1, len(pci_devices))
self.assertEqual('type-PCI', pci_devices[0].dev_type)
- # Update connection with original pci info with sriov PFs
- fake_connection = self._get_connection(pci_info=pci_info,
- hostname='test_compute0')
- self.mock_conn.return_value = fake_connection
-
- # Restart the compute service
- self.restart_compute_service(self.compute)
+ # Restart the compute service with sriov PFs
+ self.restart_compute_service(
+ self.compute.host, pci_info=pci_info, keep_hypervisor_state=False)
# Verify if PCI devices are of type type-PF or type-VF
pci_devices = objects.PciDeviceList.get_by_compute_node(
@@ -666,12 +1172,92 @@ class SRIOVServersTest(_PCIServersTestBase):
],
)
+ def test_change_bound_port_vnic_type_kills_compute_at_restart(self):
+ """Create a server with a direct port and change the vnic_type of the
+ bound port to macvtap. Then restart the compute service.
+
+ As the vnic_type is changed on the port but the vif_type is hwveb
+ instead of macvtap the vif plug logic will try to look up the netdev
+ of the parent VF. Howvere that VF consumed by the instance so the
+ netdev does not exists. This causes that the compute service will fail
+ with an exception during startup
+ """
+ pci_info = fakelibvirt.HostPCIDevicesInfo(num_pfs=1, num_vfs=2)
+ self.start_compute(pci_info=pci_info)
+
+ # create a direct port
+ port = self.neutron.network_4_port_1
+ self.neutron.create_port({'port': port})
+
+ # create a server using the VF via neutron
+ server = self._create_server(networks=[{'port': port['id']}])
+
+ # update the vnic_type of the port in neutron
+ port = copy.deepcopy(port)
+ port['binding:vnic_type'] = 'macvtap'
+ self.neutron.update_port(port['id'], {"port": port})
+
+ compute = self.computes['compute1']
+
+ # Force an update on the instance info cache to ensure nova gets the
+ # information about the updated port
+ with context.target_cell(
+ context.get_admin_context(),
+ self.host_mappings['compute1'].cell_mapping
+ ) as cctxt:
+ compute.manager._heal_instance_info_cache(cctxt)
+ self.assertIn(
+ 'The vnic_type of the bound port %s has been changed in '
+ 'neutron from "direct" to "macvtap". Changing vnic_type of a '
+ 'bound port is not supported by Nova. To avoid breaking the '
+ 'connectivity of the instance please change the port '
+ 'vnic_type back to "direct".' % port['id'],
+ self.stdlog.logger.output,
+ )
+
+ def fake_get_ifname_by_pci_address(pci_addr: str, pf_interface=False):
+ # we want to fail the netdev lookup only if the pci_address is
+ # already consumed by our instance. So we look into the instance
+ # definition to see if the device is attached to the instance as VF
+ conn = compute.manager.driver._host.get_connection()
+ dom = conn.lookupByUUIDString(server['id'])
+ dev = dom._def['devices']['nics'][0]
+ lookup_addr = pci_addr.replace(':', '_').replace('.', '_')
+ if (
+ dev['type'] == 'hostdev' and
+ dev['source'] == 'pci_' + lookup_addr
+ ):
+ # nova tried to look up the netdev of an already consumed VF.
+ # So we have to fail
+ raise exception.PciDeviceNotFoundById(id=pci_addr)
+
+ # We need to simulate the actual failure manually as in our functional
+ # environment all the PCI lookup is mocked. In reality nova tries to
+ # look up the netdev of the pci device on the host used by the port as
+ # the parent of the macvtap. However, as the originally direct port is
+ # bound to the instance, the VF pci device is already consumed by the
+ # instance and therefore there is no netdev for the VF.
+ self.libvirt.mock_get_ifname_by_pci_address.side_effect = (
+ fake_get_ifname_by_pci_address
+ )
+ # Nova cannot prevent the vnic_type change on a bound port. Neutron
+ # should prevent that instead. But the nova-compute should still
+ # be able to start up and only log an ERROR for this instance in
+ # inconsistent state.
+ self.restart_compute_service('compute1')
+ self.assertIn(
+ 'Virtual interface plugging failed for instance. Probably the '
+ 'vnic_type of the bound port has been changed. Nova does not '
+ 'support such change.',
+ self.stdlog.logger.output,
+ )
+
class SRIOVAttachDetachTest(_PCIServersTestBase):
# no need for aliases as these test will request SRIOV via neutron
PCI_ALIAS = []
- PCI_PASSTHROUGH_WHITELIST = [jsonutils.dumps(x) for x in (
+ PCI_DEVICE_SPEC = [jsonutils.dumps(x) for x in (
{
'vendor_id': fakelibvirt.PCI_VEND_ID,
'product_id': fakelibvirt.PF_PROD_ID,
@@ -729,10 +1315,9 @@ class SRIOVAttachDetachTest(_PCIServersTestBase):
host_info = fakelibvirt.HostInfo(cpu_nodes=2, cpu_sockets=1,
cpu_cores=2, cpu_threads=2)
pci_info = fakelibvirt.HostPCIDevicesInfo(num_pfs=1, num_vfs=1)
- fake_connection = self._get_connection(host_info, pci_info)
- self.mock_conn.return_value = fake_connection
-
- self.compute = self.start_service('compute', host='test_compute0')
+ self.start_compute(
+ 'test_compute0', host_info=host_info, pci_info=pci_info)
+ self.compute = self.computes['test_compute0']
# Create server with a port
server = self._create_server(networks=[{'port': first_port_id}])
@@ -782,7 +1367,7 @@ class SRIOVAttachDetachTest(_PCIServersTestBase):
self.neutron.sriov_pf_port2['id'])
-class VDPAServersTest(_PCIServersTestBase):
+class VDPAServersTest(_PCIServersWithMigrationTestBase):
# this is needed for os_compute_api:os-migrate-server:migrate policy
ADMIN_API = True
@@ -791,7 +1376,7 @@ class VDPAServersTest(_PCIServersTestBase):
# Whitelist both the PF and VF; in reality, you probably wouldn't do this
# but we want to make sure that the PF is correctly taken off the table
# once any VF is used
- PCI_PASSTHROUGH_WHITELIST = [jsonutils.dumps(x) for x in (
+ PCI_DEVICE_SPEC = [jsonutils.dumps(x) for x in (
{
'vendor_id': '15b3',
'product_id': '101d',
@@ -814,14 +1399,13 @@ class VDPAServersTest(_PCIServersTestBase):
def setUp(self):
super().setUp()
-
# The ultimate base class _IntegratedTestBase uses NeutronFixture but
# we need a bit more intelligent neutron for these tests. Applying the
# new fixture here means that we re-stub what the previous neutron
# fixture already stubbed.
self.neutron = self.useFixture(base.LibvirtNeutronFixture(self))
- def start_compute(self):
+ def start_vdpa_compute(self, hostname='compute-0'):
vf_ratio = self.NUM_VFS // self.NUM_PFS
pci_info = fakelibvirt.HostPCIDevicesInfo(
@@ -859,7 +1443,7 @@ class VDPAServersTest(_PCIServersTestBase):
driver_name='mlx5_core')
vdpa_info.add_device(f'vdpa_vdpa{idx}', idx, vf)
- return super().start_compute(
+ return super().start_compute(hostname=hostname,
pci_info=pci_info, vdpa_info=vdpa_info,
libvirt_version=self.FAKE_LIBVIRT_VERSION,
qemu_version=self.FAKE_QEMU_VERSION)
@@ -900,7 +1484,6 @@ class VDPAServersTest(_PCIServersTestBase):
expected = """
<interface type="vdpa">
<mac address="b5:bc:2e:e7:51:ee"/>
- <model type="virtio"/>
<source dev="/dev/vhost-vdpa-3"/>
</interface>"""
actual = etree.tostring(elem, encoding='unicode')
@@ -914,7 +1497,7 @@ class VDPAServersTest(_PCIServersTestBase):
fake_create,
)
- hostname = self.start_compute()
+ hostname = self.start_vdpa_compute()
num_pci = self.NUM_PFS + self.NUM_VFS
# both the PF and VF with vDPA capabilities (dev_type=vdpa) should have
@@ -947,12 +1530,16 @@ class VDPAServersTest(_PCIServersTestBase):
port['binding:profile'],
)
- def _test_common(self, op, *args, **kwargs):
- self.start_compute()
-
+ def _create_port_and_server(self):
# create the port and a server, with the port attached to the server
vdpa_port = self.create_vdpa_port()
server = self._create_server(networks=[{'port': vdpa_port['id']}])
+ return vdpa_port, server
+
+ def _test_common(self, op, *args, **kwargs):
+ self.start_vdpa_compute()
+
+ vdpa_port, server = self._create_port_and_server()
# attempt the unsupported action and ensure it fails
ex = self.assertRaises(
@@ -962,40 +1549,393 @@ class VDPAServersTest(_PCIServersTestBase):
'not supported for instance with vDPA ports',
ex.response.text)
- def test_attach_interface(self):
- self.start_compute()
+ # NOTE(sbauza): Now we're post-Antelope release, we don't need to support
+ # this test
+ def test_attach_interface_service_version_61(self):
+ self.flags(disable_compute_service_check_for_ffu=True,
+ group='workarounds')
+ with mock.patch(
+ "nova.objects.service.get_minimum_version_all_cells",
+ return_value=61
+ ):
+ self._test_common(self._attach_interface, uuids.vdpa_port)
+ def test_attach_interface(self):
+ hostname = self.start_vdpa_compute()
# create the port and a server, but don't attach the port to the server
# yet
- vdpa_port = self.create_vdpa_port()
server = self._create_server(networks='none')
-
+ vdpa_port = self.create_vdpa_port()
# attempt to attach the port to the server
- ex = self.assertRaises(
- client.OpenStackApiException,
- self._attach_interface, server, vdpa_port['id'])
- self.assertIn(
- 'not supported for instance with vDPA ports',
- ex.response.text)
+ self._attach_interface(server, vdpa_port['id'])
+ # ensure the binding details sent to "neutron" were correct
+ port = self.neutron.show_port(vdpa_port['id'])['port']
+ self.assertIn('binding:profile', port)
+ self.assertEqual(
+ {
+ 'pci_vendor_info': '15b3:101e',
+ 'pci_slot': '0000:06:00.4',
+ 'physical_network': 'physnet4',
+ },
+ port['binding:profile'],
+ )
+ self.assertEqual(hostname, port['binding:host_id'])
+ self.assertEqual(server['id'], port['device_id'])
+
+ # NOTE(sbauza): Now we're post-Antelope release, we don't need to support
+ # this test
+ def test_detach_interface_service_version_61(self):
+ self.flags(disable_compute_service_check_for_ffu=True,
+ group='workarounds')
+ with mock.patch(
+ "nova.objects.service.get_minimum_version_all_cells",
+ return_value=61
+ ):
+ self._test_common(self._detach_interface, uuids.vdpa_port)
def test_detach_interface(self):
- self._test_common(self._detach_interface, uuids.vdpa_port)
+ self.start_vdpa_compute()
+ vdpa_port, server = self._create_port_and_server()
+ # ensure the binding details sent to "neutron" were correct
+ port = self.neutron.show_port(vdpa_port['id'])['port']
+ self.assertEqual(server['id'], port['device_id'])
+ self._detach_interface(server, vdpa_port['id'])
+ # ensure the port is no longer owned by the vm
+ port = self.neutron.show_port(vdpa_port['id'])['port']
+ self.assertEqual('', port['device_id'])
+ self.assertEqual({}, port['binding:profile'])
- def test_shelve(self):
- self._test_common(self._shelve_server)
+ def test_shelve_offload(self):
+ hostname = self.start_vdpa_compute()
+ vdpa_port, server = self._create_port_and_server()
+ # assert the port is bound to the vm and the compute host
+ port = self.neutron.show_port(vdpa_port['id'])['port']
+ self.assertEqual(server['id'], port['device_id'])
+ self.assertEqual(hostname, port['binding:host_id'])
+ num_pci = self.NUM_PFS + self.NUM_VFS
+ # -2 we claim the vdpa device which make the parent PF unavailable
+ self.assertPCIDeviceCounts(hostname, total=num_pci, free=num_pci - 2)
+ server = self._shelve_server(server)
+ # now that the vm is shelve offloaded it should not be bound
+ # to any host but should still be owned by the vm
+ port = self.neutron.show_port(vdpa_port['id'])['port']
+ self.assertEqual(server['id'], port['device_id'])
+ self.assertIsNone(port['binding:host_id'])
+ self.assertIn('binding:profile', port)
+ self.assertIsNone(server['OS-EXT-SRV-ATTR:hypervisor_hostname'])
+ self.assertIsNone(server['OS-EXT-SRV-ATTR:host'])
+ self.assertPCIDeviceCounts(hostname, total=num_pci, free=num_pci)
- def test_suspend(self):
- self._test_common(self._suspend_server)
+ def test_unshelve_to_same_host(self):
+ hostname = self.start_vdpa_compute()
+ num_pci = self.NUM_PFS + self.NUM_VFS
+ self.assertPCIDeviceCounts(hostname, total=num_pci, free=num_pci)
+
+ vdpa_port, server = self._create_port_and_server()
+ self.assertPCIDeviceCounts(hostname, total=num_pci, free=num_pci - 2)
+ self.assertEqual(
+ hostname, server['OS-EXT-SRV-ATTR:hypervisor_hostname'])
+ port = self.neutron.show_port(vdpa_port['id'])['port']
+ self.assertEqual(hostname, port['binding:host_id'])
+
+ server = self._shelve_server(server)
+ self.assertPCIDeviceCounts(hostname, total=num_pci, free=num_pci)
+ self.assertIsNone(server['OS-EXT-SRV-ATTR:hypervisor_hostname'])
+ port = self.neutron.show_port(vdpa_port['id'])['port']
+ self.assertIsNone(port['binding:host_id'])
+
+ server = self._unshelve_server(server)
+ self.assertPCIDeviceCounts(hostname, total=num_pci, free=num_pci - 2)
+ self.assertEqual(
+ hostname, server['OS-EXT-SRV-ATTR:hypervisor_hostname'])
+ port = self.neutron.show_port(vdpa_port['id'])['port']
+ self.assertEqual(hostname, port['binding:host_id'])
+
+ def test_unshelve_to_different_host(self):
+ source = self.start_vdpa_compute(hostname='source')
+ dest = self.start_vdpa_compute(hostname='dest')
+
+ num_pci = self.NUM_PFS + self.NUM_VFS
+ self.assertPCIDeviceCounts(source, total=num_pci, free=num_pci)
+ self.assertPCIDeviceCounts(dest, total=num_pci, free=num_pci)
+
+ # ensure we boot the vm on the "source" compute
+ self.api.put_service(
+ self.computes['dest'].service_ref.uuid, {'status': 'disabled'})
+ vdpa_port, server = self._create_port_and_server()
+ self.assertPCIDeviceCounts(source, total=num_pci, free=num_pci - 2)
+ self.assertEqual(
+ source, server['OS-EXT-SRV-ATTR:hypervisor_hostname'])
+ port = self.neutron.show_port(vdpa_port['id'])['port']
+ self.assertEqual(source, port['binding:host_id'])
+
+ server = self._shelve_server(server)
+ self.assertPCIDeviceCounts(source, total=num_pci, free=num_pci)
+ self.assertIsNone(server['OS-EXT-SRV-ATTR:hypervisor_hostname'])
+ port = self.neutron.show_port(vdpa_port['id'])['port']
+ self.assertIsNone(port['binding:host_id'])
+
+ # force the unshelve to the other host
+ self.api.put_service(
+ self.computes['source'].service_ref.uuid, {'status': 'disabled'})
+ self.api.put_service(
+ self.computes['dest'].service_ref.uuid, {'status': 'enabled'})
+ self.assertPCIDeviceCounts(dest, total=num_pci, free=num_pci)
+ server = self._unshelve_server(server)
+ # the dest devices should be claimed
+ self.assertPCIDeviceCounts(dest, total=num_pci, free=num_pci - 2)
+ # and the source host devices should still be free
+ self.assertPCIDeviceCounts(source, total=num_pci, free=num_pci)
+ self.assertEqual(
+ dest, server['OS-EXT-SRV-ATTR:hypervisor_hostname'])
+ port = self.neutron.show_port(vdpa_port['id'])['port']
+ self.assertEqual(dest, port['binding:host_id'])
def test_evacute(self):
- self._test_common(self._evacuate_server)
+ source = self.start_vdpa_compute(hostname='source')
+ dest = self.start_vdpa_compute(hostname='dest')
- def test_resize(self):
- flavor_id = self._create_flavor()
- self._test_common(self._resize_server, flavor_id)
+ num_pci = self.NUM_PFS + self.NUM_VFS
+ self.assertPCIDeviceCounts(source, total=num_pci, free=num_pci)
+ self.assertPCIDeviceCounts(dest, total=num_pci, free=num_pci)
+
+ # ensure we boot the vm on the "source" compute
+ self.api.put_service(
+ self.computes['dest'].service_ref.uuid, {'status': 'disabled'})
+ vdpa_port, server = self._create_port_and_server()
+ self.assertPCIDeviceCounts(source, total=num_pci, free=num_pci - 2)
+ self.assertEqual(
+ source, server['OS-EXT-SRV-ATTR:hypervisor_hostname'])
+ port = self.neutron.show_port(vdpa_port['id'])['port']
+ self.assertEqual(source, port['binding:host_id'])
+
+ # stop the source compute and enable the dest
+ self.api.put_service(
+ self.computes['dest'].service_ref.uuid, {'status': 'enabled'})
+ self.computes['source'].stop()
+ # Down the source compute to enable the evacuation
+ self.api.put_service(
+ self.computes['source'].service_ref.uuid, {'forced_down': True})
+
+ self.assertPCIDeviceCounts(dest, total=num_pci, free=num_pci)
+ server = self._evacuate_server(server)
+ self.assertPCIDeviceCounts(dest, total=num_pci, free=num_pci - 2)
+ self.assertEqual(
+ dest, server['OS-EXT-SRV-ATTR:hypervisor_hostname'])
+ port = self.neutron.show_port(vdpa_port['id'])['port']
+ self.assertEqual(dest, port['binding:host_id'])
+
+ # as the source compute is offline the pci claims will not be cleaned
+ # up on the source compute.
+ self.assertPCIDeviceCounts(source, total=num_pci, free=num_pci - 2)
+ # but if you fix/restart the source node the allocations for evacuated
+ # instances should be released.
+ self.restart_compute_service(source)
+ self.assertPCIDeviceCounts(source, total=num_pci, free=num_pci)
+
+ def test_resize_same_host(self):
+ self.flags(allow_resize_to_same_host=True)
+ num_pci = self.NUM_PFS + self.NUM_VFS
+ source = self.start_vdpa_compute()
+ vdpa_port, server = self._create_port_and_server()
+ # before we resize the vm should be using 1 VF but that will mark
+ # the PF as unavailable so we assert 2 devices are in use.
+ self.assertPCIDeviceCounts(source, total=num_pci, free=num_pci - 2)
+ flavor_id = self._create_flavor(name='new-flavor')
+ self.assertNotEqual(server['flavor']['original_name'], 'new-flavor')
+ with mock.patch(
+ 'nova.virt.libvirt.driver.LibvirtDriver'
+ '.migrate_disk_and_power_off', return_value='{}',
+ ):
+ server = self._resize_server(server, flavor_id)
+ self.assertEqual(
+ server['flavor']['original_name'], 'new-flavor')
+ # in resize verify the VF claims should be doubled even
+ # for same host resize so assert that 3 are in devices in use
+ # 1 PF and 2 VFs .
+ self.assertPCIDeviceCounts(source, total=num_pci, free=num_pci - 3)
+ server = self._confirm_resize(server)
+ # but once we confrim it should be reduced back to 1 PF and 1 VF
+ self.assertPCIDeviceCounts(source, total=num_pci, free=num_pci - 2)
+ # assert the hostname has not have changed as part
+ # of the resize.
+ self.assertEqual(
+ source, server['OS-EXT-SRV-ATTR:hypervisor_hostname'])
+
+ def test_resize_different_host(self):
+ self.flags(allow_resize_to_same_host=False)
+ source = self.start_vdpa_compute(hostname='source')
+ dest = self.start_vdpa_compute(hostname='dest')
+
+ num_pci = self.NUM_PFS + self.NUM_VFS
+ self.assertPCIDeviceCounts(source, total=num_pci, free=num_pci)
+ self.assertPCIDeviceCounts(dest, total=num_pci, free=num_pci)
+
+ # ensure we boot the vm on the "source" compute
+ self.api.put_service(
+ self.computes['dest'].service_ref.uuid, {'status': 'disabled'})
+ vdpa_port, server = self._create_port_and_server()
+ self.assertPCIDeviceCounts(source, total=num_pci, free=num_pci - 2)
+ flavor_id = self._create_flavor(name='new-flavor')
+ self.assertNotEqual(server['flavor']['original_name'], 'new-flavor')
+ # disable the source compute and enable the dest
+ self.api.put_service(
+ self.computes['source'].service_ref.uuid, {'status': 'disabled'})
+ self.api.put_service(
+ self.computes['dest'].service_ref.uuid, {'status': 'enabled'})
+ with mock.patch(
+ 'nova.virt.libvirt.driver.LibvirtDriver'
+ '.migrate_disk_and_power_off', return_value='{}',
+ ):
+ server = self._resize_server(server, flavor_id)
+ self.assertEqual(
+ server['flavor']['original_name'], 'new-flavor')
+ self.assertPCIDeviceCounts(source, total=num_pci, free=num_pci - 2)
+ self.assertPCIDeviceCounts(dest, total=num_pci, free=num_pci - 2)
+ server = self._confirm_resize(server)
+ self.assertPCIDeviceCounts(source, total=num_pci, free=num_pci)
+ self.assertPCIDeviceCounts(dest, total=num_pci, free=num_pci - 2)
+ self.assertEqual(
+ dest, server['OS-EXT-SRV-ATTR:hypervisor_hostname'])
+
+ def test_resize_revert(self):
+ self.flags(allow_resize_to_same_host=False)
+ source = self.start_vdpa_compute(hostname='source')
+ dest = self.start_vdpa_compute(hostname='dest')
+
+ num_pci = self.NUM_PFS + self.NUM_VFS
+ self.assertPCIDeviceCounts(source, total=num_pci, free=num_pci)
+ self.assertPCIDeviceCounts(dest, total=num_pci, free=num_pci)
+
+ # ensure we boot the vm on the "source" compute
+ self.api.put_service(
+ self.computes['dest'].service_ref.uuid, {'status': 'disabled'})
+ vdpa_port, server = self._create_port_and_server()
+ self.assertPCIDeviceCounts(source, total=num_pci, free=num_pci - 2)
+ flavor_id = self._create_flavor(name='new-flavor')
+ self.assertNotEqual(server['flavor']['original_name'], 'new-flavor')
+ # disable the source compute and enable the dest
+ self.api.put_service(
+ self.computes['source'].service_ref.uuid, {'status': 'disabled'})
+ self.api.put_service(
+ self.computes['dest'].service_ref.uuid, {'status': 'enabled'})
+ with mock.patch(
+ 'nova.virt.libvirt.driver.LibvirtDriver'
+ '.migrate_disk_and_power_off', return_value='{}',
+ ):
+ server = self._resize_server(server, flavor_id)
+ self.assertEqual(
+ server['flavor']['original_name'], 'new-flavor')
+ # in resize verify both the dest and source pci claims should be
+ # present.
+ self.assertPCIDeviceCounts(source, total=num_pci, free=num_pci - 2)
+ self.assertPCIDeviceCounts(dest, total=num_pci, free=num_pci - 2)
+ server = self._revert_resize(server)
+ # but once we revert the dest claims should be freed.
+ self.assertPCIDeviceCounts(dest, total=num_pci, free=num_pci)
+ self.assertPCIDeviceCounts(source, total=num_pci, free=num_pci - 2)
+ self.assertEqual(
+ source, server['OS-EXT-SRV-ATTR:hypervisor_hostname'])
def test_cold_migrate(self):
- self._test_common(self._migrate_server)
+ source = self.start_vdpa_compute(hostname='source')
+ dest = self.start_vdpa_compute(hostname='dest')
+
+ num_pci = self.NUM_PFS + self.NUM_VFS
+ self.assertPCIDeviceCounts(source, total=num_pci, free=num_pci)
+ self.assertPCIDeviceCounts(dest, total=num_pci, free=num_pci)
+
+ # ensure we boot the vm on the "source" compute
+ self.api.put_service(
+ self.computes['dest'].service_ref.uuid, {'status': 'disabled'})
+ vdpa_port, server = self._create_port_and_server()
+ self.assertEqual(
+ source, server['OS-EXT-SRV-ATTR:hypervisor_hostname'])
+
+ self.assertPCIDeviceCounts(source, total=num_pci, free=num_pci - 2)
+ # enable the dest we do not need to disable the source since cold
+ # migrate wont happen to the same host in the libvirt driver
+ self.api.put_service(
+ self.computes['dest'].service_ref.uuid, {'status': 'enabled'})
+ with mock.patch(
+ 'nova.virt.libvirt.driver.LibvirtDriver'
+ '.migrate_disk_and_power_off', return_value='{}',
+ ):
+ server = self._migrate_server(server)
+ self.assertEqual(
+ dest, server['OS-EXT-SRV-ATTR:hypervisor_hostname'])
+ self.assertPCIDeviceCounts(source, total=num_pci, free=num_pci - 2)
+ self.assertPCIDeviceCounts(dest, total=num_pci, free=num_pci - 2)
+ server = self._confirm_resize(server)
+ self.assertPCIDeviceCounts(source, total=num_pci, free=num_pci)
+ self.assertPCIDeviceCounts(dest, total=num_pci, free=num_pci - 2)
+ self.assertEqual(
+ dest, server['OS-EXT-SRV-ATTR:hypervisor_hostname'])
+
+ # NOTE(sbauza): Now we're post-Antelope release, we don't need to support
+ # this test
+ def test_suspend_and_resume_service_version_62(self):
+ self.flags(disable_compute_service_check_for_ffu=True,
+ group='workarounds')
+ with mock.patch(
+ "nova.objects.service.get_minimum_version_all_cells",
+ return_value=62
+ ):
+ self._test_common(self._suspend_server)
+
+ def test_suspend_and_resume(self):
+ source = self.start_vdpa_compute(hostname='source')
+ vdpa_port, server = self._create_port_and_server()
+ num_pci = self.NUM_PFS + self.NUM_VFS
+ self.assertPCIDeviceCounts(source, total=num_pci, free=num_pci - 2)
+ server = self._suspend_server(server)
+ self.assertPCIDeviceCounts(source, total=num_pci, free=num_pci - 2)
+ self.assertEqual('SUSPENDED', server['status'])
+ server = self._resume_server(server)
+ self.assertPCIDeviceCounts(source, total=num_pci, free=num_pci - 2)
+ self.assertEqual('ACTIVE', server['status'])
+
+ # NOTE(sbauza): Now we're post-Antelope release, we don't need to support
+ # this test
+ def test_live_migrate_service_version_62(self):
+ self.flags(disable_compute_service_check_for_ffu=True,
+ group='workarounds')
+ with mock.patch(
+ "nova.objects.service.get_minimum_version_all_cells",
+ return_value=62
+ ):
+ self._test_common(self._live_migrate)
+
+ def test_live_migrate(self):
+ source = self.start_vdpa_compute(hostname='source')
+ dest = self.start_vdpa_compute(hostname='dest')
+
+ num_pci = self.NUM_PFS + self.NUM_VFS
+ self.assertPCIDeviceCounts(source, total=num_pci, free=num_pci)
+ self.assertPCIDeviceCounts(dest, total=num_pci, free=num_pci)
+
+ # ensure we boot the vm on the "source" compute
+ self.api.put_service(
+ self.computes['dest'].service_ref.uuid, {'status': 'disabled'})
+ vdpa_port, server = self._create_port_and_server()
+ self.assertEqual(
+ source, server['OS-EXT-SRV-ATTR:hypervisor_hostname'])
+
+ self.assertPCIDeviceCounts(source, total=num_pci, free=num_pci - 2)
+ # enable the dest we do not need to disable the source since cold
+ # migrate wont happen to the same host in the libvirt driver
+ self.api.put_service(
+ self.computes['dest'].service_ref.uuid, {'status': 'enabled'})
+
+ with mock.patch(
+ 'nova.virt.libvirt.LibvirtDriver.'
+ '_detach_direct_passthrough_vifs'
+ ):
+ server = self._live_migrate(server)
+ self.assertPCIDeviceCounts(source, total=num_pci, free=num_pci)
+ self.assertPCIDeviceCounts(dest, total=num_pci, free=num_pci - 2)
+ self.assertEqual(
+ dest, server['OS-EXT-SRV-ATTR:hypervisor_hostname'])
class PCIServersTest(_PCIServersTestBase):
@@ -1004,7 +1944,7 @@ class PCIServersTest(_PCIServersTestBase):
microversion = 'latest'
ALIAS_NAME = 'a1'
- PCI_PASSTHROUGH_WHITELIST = [jsonutils.dumps(
+ PCI_DEVICE_SPEC = [jsonutils.dumps(
{
'vendor_id': fakelibvirt.PCI_VEND_ID,
'product_id': fakelibvirt.PCI_PROD_ID,
@@ -1018,9 +1958,15 @@ class PCIServersTest(_PCIServersTestBase):
}
)]
+ def setUp(self):
+ super().setUp()
+ self.flags(group="pci", report_in_placement=True)
+ self.flags(group='filter_scheduler', pci_in_placement=True)
+
def test_create_server_with_pci_dev_and_numa(self):
"""Verifies that an instance can be booted with cpu pinning and with an
- assigned pci device.
+ assigned pci device with legacy policy and numa info for the pci
+ device.
"""
self.flags(cpu_dedicated_set='0-7', group='compute')
@@ -1028,6 +1974,13 @@ class PCIServersTest(_PCIServersTestBase):
pci_info = fakelibvirt.HostPCIDevicesInfo(num_pci=1, numa_node=1)
self.start_compute(pci_info=pci_info)
+ self.assert_placement_pci_view(
+ "compute1",
+ inventories={"0000:81:00.0": {self.PCI_RC: 1}},
+ traits={"0000:81:00.0": []},
+ usages={"0000:81:00.0": {self.PCI_RC: 0}},
+ )
+
# create a flavor
extra_spec = {
'hw:cpu_policy': 'dedicated',
@@ -1035,18 +1988,35 @@ class PCIServersTest(_PCIServersTestBase):
}
flavor_id = self._create_flavor(extra_spec=extra_spec)
- self._create_server(flavor_id=flavor_id, networks='none')
+ server = self._create_server(flavor_id=flavor_id, networks='none')
+
+ self.assert_placement_pci_view(
+ "compute1",
+ inventories={"0000:81:00.0": {self.PCI_RC: 1}},
+ traits={"0000:81:00.0": []},
+ usages={"0000:81:00.0": {self.PCI_RC: 1}},
+ allocations={server['id']: {"0000:81:00.0": {self.PCI_RC: 1}}},
+ )
+ self.assert_no_pci_healing("compute1")
def test_create_server_with_pci_dev_and_numa_fails(self):
"""This test ensures that it is not possible to allocated CPU and
- memory resources from one NUMA node and a PCI device from another.
+ memory resources from one NUMA node and a PCI device from another
+ if we use the legacy policy and the pci device reports numa info.
"""
-
self.flags(cpu_dedicated_set='0-7', group='compute')
pci_info = fakelibvirt.HostPCIDevicesInfo(num_pci=1, numa_node=0)
self.start_compute(pci_info=pci_info)
+ compute1_placement_pci_view = {
+ "inventories": {"0000:81:00.0": {self.PCI_RC: 1}},
+ "traits": {"0000:81:00.0": []},
+ "usages": {"0000:81:00.0": {self.PCI_RC: 0}},
+ }
+ self.assert_placement_pci_view(
+ "compute1", **compute1_placement_pci_view)
+
# boot one instance with no PCI device to "fill up" NUMA node 0
extra_spec = {'hw:cpu_policy': 'dedicated'}
flavor_id = self._create_flavor(vcpu=4, extra_spec=extra_spec)
@@ -1058,6 +2028,10 @@ class PCIServersTest(_PCIServersTestBase):
self._create_server(
flavor_id=flavor_id, networks='none', expected_state='ERROR')
+ self.assert_placement_pci_view(
+ "compute1", **compute1_placement_pci_view)
+ self.assert_no_pci_healing("compute1")
+
def test_live_migrate_server_with_pci(self):
"""Live migrate an instance with a PCI passthrough device.
@@ -1069,14 +2043,42 @@ class PCIServersTest(_PCIServersTestBase):
self.start_compute(
hostname='test_compute0',
pci_info=fakelibvirt.HostPCIDevicesInfo(num_pci=1))
+
+ test_compute0_placement_pci_view = {
+ "inventories": {"0000:81:00.0": {self.PCI_RC: 1}},
+ "traits": {"0000:81:00.0": []},
+ "usages": {"0000:81:00.0": {self.PCI_RC: 0}},
+ "allocations": {},
+ }
+ self.assert_placement_pci_view(
+ "test_compute0", **test_compute0_placement_pci_view)
+
self.start_compute(
hostname='test_compute1',
pci_info=fakelibvirt.HostPCIDevicesInfo(num_pci=1))
+ test_compute1_placement_pci_view = {
+ "inventories": {"0000:81:00.0": {self.PCI_RC: 1}},
+ "traits": {"0000:81:00.0": []},
+ "usages": {"0000:81:00.0": {self.PCI_RC: 0}},
+ }
+ self.assert_placement_pci_view(
+ "test_compute1", **test_compute1_placement_pci_view)
+
# create a server
extra_spec = {'pci_passthrough:alias': f'{self.ALIAS_NAME}:1'}
flavor_id = self._create_flavor(extra_spec=extra_spec)
- server = self._create_server(flavor_id=flavor_id, networks='none')
+ server = self._create_server(
+ flavor_id=flavor_id, networks='none', host="test_compute0")
+
+ test_compute0_placement_pci_view[
+ "usages"]["0000:81:00.0"][self.PCI_RC] = 1
+ test_compute0_placement_pci_view[
+ "allocations"][server['id']] = {"0000:81:00.0": {self.PCI_RC: 1}}
+ self.assert_placement_pci_view(
+ "test_compute0", **test_compute0_placement_pci_view)
+ self.assert_placement_pci_view(
+ "test_compute1", **test_compute1_placement_pci_view)
# now live migrate that server
ex = self.assertRaises(
@@ -1088,28 +2090,400 @@ class PCIServersTest(_PCIServersTestBase):
# this will bubble to the API
self.assertEqual(500, ex.response.status_code)
self.assertIn('NoValidHost', str(ex))
+ self.assert_placement_pci_view(
+ "test_compute0", **test_compute0_placement_pci_view)
+ self.assert_placement_pci_view(
+ "test_compute1", **test_compute1_placement_pci_view)
+ self.assert_no_pci_healing("test_compute0")
+ self.assert_no_pci_healing("test_compute1")
def test_resize_pci_to_vanilla(self):
# Start two computes, one with PCI and one without.
self.start_compute(
hostname='test_compute0',
pci_info=fakelibvirt.HostPCIDevicesInfo(num_pci=1))
+ test_compute0_placement_pci_view = {
+ "inventories": {"0000:81:00.0": {self.PCI_RC: 1}},
+ "traits": {"0000:81:00.0": []},
+ "usages": {"0000:81:00.0": {self.PCI_RC: 0}},
+ "allocations": {},
+ }
+ self.assert_placement_pci_view(
+ "test_compute0", **test_compute0_placement_pci_view)
+
self.start_compute(hostname='test_compute1')
+ test_compute1_placement_pci_view = {
+ "inventories": {},
+ "traits": {},
+ "usages": {},
+ "allocations": {},
+ }
+ self.assert_placement_pci_view(
+ "test_compute1", **test_compute1_placement_pci_view)
# Boot a server with a single PCI device.
extra_spec = {'pci_passthrough:alias': f'{self.ALIAS_NAME}:1'}
pci_flavor_id = self._create_flavor(extra_spec=extra_spec)
server = self._create_server(flavor_id=pci_flavor_id, networks='none')
+ test_compute0_placement_pci_view[
+ "usages"]["0000:81:00.0"][self.PCI_RC] = 1
+ test_compute0_placement_pci_view[
+ "allocations"][server['id']] = {"0000:81:00.0": {self.PCI_RC: 1}}
+ self.assert_placement_pci_view(
+ "test_compute0", **test_compute0_placement_pci_view)
+ self.assert_placement_pci_view(
+ "test_compute1", **test_compute1_placement_pci_view)
+
# Resize it to a flavor without PCI devices. We expect this to work, as
# test_compute1 is available.
- # FIXME(artom) This is bug 1941005.
flavor_id = self._create_flavor()
- ex = self.assertRaises(client.OpenStackApiException,
- self._resize_server, server, flavor_id)
- self.assertEqual(500, ex.response.status_code)
- self.assertIn('NoValidHost', str(ex))
- # self._confirm_resize(server)
+ with mock.patch(
+ 'nova.virt.libvirt.driver.LibvirtDriver'
+ '.migrate_disk_and_power_off',
+ return_value='{}',
+ ):
+ self._resize_server(server, flavor_id)
+ self._confirm_resize(server)
+ self.assertPCIDeviceCounts('test_compute0', total=1, free=1)
+ self.assertPCIDeviceCounts('test_compute1', total=0, free=0)
+ test_compute0_placement_pci_view[
+ "usages"]["0000:81:00.0"][self.PCI_RC] = 0
+ del test_compute0_placement_pci_view["allocations"][server['id']]
+ self.assert_placement_pci_view(
+ "test_compute0", **test_compute0_placement_pci_view)
+ self.assert_placement_pci_view(
+ "test_compute1", **test_compute1_placement_pci_view)
+ self.assert_no_pci_healing("test_compute0")
+ self.assert_no_pci_healing("test_compute1")
+
+ def test_resize_vanilla_to_pci(self):
+ """Resize an instance from a non PCI flavor to a PCI flavor"""
+ # Start two computes, one with PCI and one without.
+ self.start_compute(
+ hostname='test_compute0',
+ pci_info=fakelibvirt.HostPCIDevicesInfo(num_pci=1))
+ test_compute0_placement_pci_view = {
+ "inventories": {"0000:81:00.0": {self.PCI_RC: 1}},
+ "traits": {"0000:81:00.0": []},
+ "usages": {"0000:81:00.0": {self.PCI_RC: 0}},
+ "allocations": {},
+ }
+ self.assert_placement_pci_view(
+ "test_compute0", **test_compute0_placement_pci_view)
+
+ self.start_compute(hostname='test_compute1')
+ test_compute1_placement_pci_view = {
+ "inventories": {},
+ "traits": {},
+ "usages": {},
+ "allocations": {},
+ }
+ self.assert_placement_pci_view(
+ "test_compute1", **test_compute1_placement_pci_view)
+
+ # Boot a server without PCI device and make sure it lands on the
+ # compute that has no device, so we can resize it later to the other
+ # host having PCI device.
+ extra_spec = {}
+ flavor_id = self._create_flavor(extra_spec=extra_spec)
+ server = self._create_server(
+ flavor_id=flavor_id, networks='none', host="test_compute1")
+
+ self.assertPCIDeviceCounts('test_compute0', total=1, free=1)
+ self.assertPCIDeviceCounts('test_compute1', total=0, free=0)
+ self.assert_placement_pci_view(
+ "test_compute0", **test_compute0_placement_pci_view)
+ self.assert_placement_pci_view(
+ "test_compute1", **test_compute1_placement_pci_view)
+
+ # Resize it to a flavor with a PCI devices. We expect this to work, as
+ # test_compute0 is available and having PCI devices.
+ extra_spec = {'pci_passthrough:alias': f'{self.ALIAS_NAME}:1'}
+ pci_flavor_id = self._create_flavor(extra_spec=extra_spec)
+ with mock.patch(
+ 'nova.virt.libvirt.driver.LibvirtDriver'
+ '.migrate_disk_and_power_off',
+ return_value='{}',
+ ):
+ self._resize_server(server, pci_flavor_id)
+ self._confirm_resize(server)
+ self.assertPCIDeviceCounts('test_compute0', total=1, free=0)
+ self.assertPCIDeviceCounts('test_compute1', total=0, free=0)
+ test_compute0_placement_pci_view[
+ "usages"]["0000:81:00.0"][self.PCI_RC] = 1
+ test_compute0_placement_pci_view[
+ "allocations"][server['id']] = {"0000:81:00.0": {self.PCI_RC: 1}}
+ self.assert_placement_pci_view(
+ "test_compute0", **test_compute0_placement_pci_view)
+ self.assert_placement_pci_view(
+ "test_compute1", **test_compute1_placement_pci_view)
+ self.assert_no_pci_healing("test_compute0")
+ self.assert_no_pci_healing("test_compute1")
+
+ def test_resize_from_one_dev_to_two(self):
+ self.start_compute(
+ hostname='test_compute0',
+ pci_info=fakelibvirt.HostPCIDevicesInfo(num_pci=1))
+ self.assertPCIDeviceCounts('test_compute0', total=1, free=1)
+ test_compute0_placement_pci_view = {
+ "inventories": {"0000:81:00.0": {self.PCI_RC: 1}},
+ "traits": {"0000:81:00.0": []},
+ "usages": {"0000:81:00.0": {self.PCI_RC: 0}},
+ "allocations": {},
+ }
+ self.assert_placement_pci_view(
+ "test_compute0", **test_compute0_placement_pci_view)
+
+ self.start_compute(
+ hostname='test_compute1',
+ pci_info=fakelibvirt.HostPCIDevicesInfo(num_pci=2),
+ )
+ self.assertPCIDeviceCounts('test_compute1', total=2, free=2)
+ test_compute1_placement_pci_view = {
+ "inventories": {
+ "0000:81:00.0": {self.PCI_RC: 1},
+ "0000:81:01.0": {self.PCI_RC: 1},
+ },
+ "traits": {
+ "0000:81:00.0": [],
+ "0000:81:01.0": [],
+ },
+ "usages": {
+ "0000:81:00.0": {self.PCI_RC: 0},
+ "0000:81:01.0": {self.PCI_RC: 0},
+ },
+ "allocations": {},
+ }
+ self.assert_placement_pci_view(
+ "test_compute1", **test_compute1_placement_pci_view)
+
+ # boot a VM on test_compute0 with a single PCI dev
+ extra_spec = {'pci_passthrough:alias': f'{self.ALIAS_NAME}:1'}
+ pci_flavor_id = self._create_flavor(extra_spec=extra_spec)
+ server = self._create_server(
+ flavor_id=pci_flavor_id, networks='none', host="test_compute0")
+
+ self.assertPCIDeviceCounts('test_compute0', total=1, free=0)
+ test_compute0_placement_pci_view["usages"][
+ "0000:81:00.0"][self.PCI_RC] = 1
+ test_compute0_placement_pci_view["allocations"][
+ server['id']] = {"0000:81:00.0": {self.PCI_RC: 1}}
+ self.assert_placement_pci_view(
+ "test_compute0", **test_compute0_placement_pci_view)
+
+ # resize the server to a flavor requesting two devices
+ extra_spec = {'pci_passthrough:alias': f'{self.ALIAS_NAME}:2'}
+ pci_flavor_id = self._create_flavor(extra_spec=extra_spec)
+ with mock.patch(
+ 'nova.virt.libvirt.driver.LibvirtDriver'
+ '.migrate_disk_and_power_off',
+ return_value='{}',
+ ):
+ self._resize_server(server, pci_flavor_id)
+
+ self.assertPCIDeviceCounts('test_compute0', total=1, free=0)
+ # one the source host the PCI allocation is now held by the migration
+ self._move_server_allocation(
+ test_compute0_placement_pci_view['allocations'], server['id'])
+ self.assert_placement_pci_view(
+ "test_compute0", **test_compute0_placement_pci_view)
+ # on the dest we have now two device allocated
+ self.assertPCIDeviceCounts('test_compute1', total=2, free=0)
+ test_compute1_placement_pci_view["usages"] = {
+ "0000:81:00.0": {self.PCI_RC: 1},
+ "0000:81:01.0": {self.PCI_RC: 1},
+ }
+ test_compute1_placement_pci_view["allocations"][
+ server['id']] = {
+ "0000:81:00.0": {self.PCI_RC: 1},
+ "0000:81:01.0": {self.PCI_RC: 1},
+ }
+ self.assert_placement_pci_view(
+ "test_compute1", **test_compute1_placement_pci_view)
+
+ # now revert the resize
+ self._revert_resize(server)
+
+ self.assertPCIDeviceCounts('test_compute0', total=1, free=0)
+ # on the host the allocation should move back to the instance UUID
+ self._move_server_allocation(
+ test_compute0_placement_pci_view["allocations"],
+ server["id"],
+ revert=True,
+ )
+ self.assert_placement_pci_view(
+ "test_compute0", **test_compute0_placement_pci_view)
+ # so the dest should be freed
+ self.assertPCIDeviceCounts('test_compute1', total=2, free=2)
+ test_compute1_placement_pci_view["usages"] = {
+ "0000:81:00.0": {self.PCI_RC: 0},
+ "0000:81:01.0": {self.PCI_RC: 0},
+ }
+ del test_compute1_placement_pci_view["allocations"][server['id']]
+ self.assert_placement_pci_view(
+ "test_compute1", **test_compute1_placement_pci_view)
+
+ # now resize again and confirm it
+ with mock.patch(
+ 'nova.virt.libvirt.driver.LibvirtDriver'
+ '.migrate_disk_and_power_off',
+ return_value='{}',
+ ):
+ self._resize_server(server, pci_flavor_id)
+ self._confirm_resize(server)
+
+ # the source host now need to be freed up
+ self.assertPCIDeviceCounts('test_compute0', total=1, free=1)
+ test_compute0_placement_pci_view["usages"] = {
+ "0000:81:00.0": {self.PCI_RC: 0},
+ }
+ test_compute0_placement_pci_view["allocations"] = {}
+ self.assert_placement_pci_view(
+ "test_compute0", **test_compute0_placement_pci_view)
+ # and dest allocated
+ self.assertPCIDeviceCounts('test_compute1', total=2, free=0)
+ test_compute1_placement_pci_view["usages"] = {
+ "0000:81:00.0": {self.PCI_RC: 1},
+ "0000:81:01.0": {self.PCI_RC: 1},
+ }
+ test_compute1_placement_pci_view["allocations"][
+ server['id']] = {
+ "0000:81:00.0": {self.PCI_RC: 1},
+ "0000:81:01.0": {self.PCI_RC: 1},
+ }
+ self.assert_placement_pci_view(
+ "test_compute1", **test_compute1_placement_pci_view)
+
+ self.assert_no_pci_healing("test_compute0")
+ self.assert_no_pci_healing("test_compute1")
+
+ def test_same_host_resize_with_pci(self):
+ """Start a single compute with 3 PCI devs and resize and instance
+ from one dev to two devs
+ """
+ self.flags(allow_resize_to_same_host=True)
+ self.start_compute(
+ hostname='test_compute0',
+ pci_info=fakelibvirt.HostPCIDevicesInfo(num_pci=3))
+ self.assertPCIDeviceCounts('test_compute0', total=3, free=3)
+ test_compute0_placement_pci_view = {
+ "inventories": {
+ "0000:81:00.0": {self.PCI_RC: 1},
+ "0000:81:01.0": {self.PCI_RC: 1},
+ "0000:81:02.0": {self.PCI_RC: 1},
+ },
+ "traits": {
+ "0000:81:00.0": [],
+ "0000:81:01.0": [],
+ "0000:81:02.0": [],
+ },
+ "usages": {
+ "0000:81:00.0": {self.PCI_RC: 0},
+ "0000:81:01.0": {self.PCI_RC: 0},
+ "0000:81:02.0": {self.PCI_RC: 0},
+ },
+ "allocations": {},
+ }
+ self.assert_placement_pci_view(
+ "test_compute0", **test_compute0_placement_pci_view)
+
+ # Boot a server with a single PCI device.
+ # To stabilize the test we reserve 81.01 and 81.02 in placement so
+ # we can be sure that the instance will use 81.00, otherwise the
+ # allocation will be random between 00, 01, and 02
+ self._reserve_placement_resource(
+ "test_compute0_0000:81:01.0", self.PCI_RC, 1)
+ self._reserve_placement_resource(
+ "test_compute0_0000:81:02.0", self.PCI_RC, 1)
+ extra_spec = {'pci_passthrough:alias': f'{self.ALIAS_NAME}:1'}
+ pci_flavor_id = self._create_flavor(extra_spec=extra_spec)
+ server = self._create_server(flavor_id=pci_flavor_id, networks='none')
+
+ self.assertPCIDeviceCounts('test_compute0', total=3, free=2)
+ test_compute0_placement_pci_view[
+ "usages"]["0000:81:00.0"][self.PCI_RC] = 1
+ test_compute0_placement_pci_view[
+ "allocations"][server['id']] = {"0000:81:00.0": {self.PCI_RC: 1}}
+ self.assert_placement_pci_view(
+ "test_compute0", **test_compute0_placement_pci_view)
+ # remove the reservations, so we can resize on the same host and
+ # consume 01 and 02
+ self._reserve_placement_resource(
+ "test_compute0_0000:81:01.0", self.PCI_RC, 0)
+ self._reserve_placement_resource(
+ "test_compute0_0000:81:02.0", self.PCI_RC, 0)
+
+ # Resize the server to use 2 PCI devices
+ extra_spec = {'pci_passthrough:alias': f'{self.ALIAS_NAME}:2'}
+ pci_flavor_id = self._create_flavor(extra_spec=extra_spec)
+ with mock.patch(
+ 'nova.virt.libvirt.driver.LibvirtDriver'
+ '.migrate_disk_and_power_off',
+ return_value='{}',
+ ):
+ self._resize_server(server, pci_flavor_id)
+
+ self.assertPCIDeviceCounts('test_compute0', total=3, free=0)
+ # the source host side of the allocation is now held by the migration
+ # UUID
+ self._move_server_allocation(
+ test_compute0_placement_pci_view["allocations"], server['id'])
+ # but we have the dest host side of the allocations on the same host
+ test_compute0_placement_pci_view[
+ "usages"]["0000:81:01.0"][self.PCI_RC] = 1
+ test_compute0_placement_pci_view[
+ "usages"]["0000:81:02.0"][self.PCI_RC] = 1
+ test_compute0_placement_pci_view["allocations"][server['id']] = {
+ "0000:81:01.0": {self.PCI_RC: 1},
+ "0000:81:02.0": {self.PCI_RC: 1},
+ }
+ self.assert_placement_pci_view(
+ "test_compute0", **test_compute0_placement_pci_view)
+
+ # revert the resize so the instance should go back to use a single
+ # device
+ self._revert_resize(server)
+ self.assertPCIDeviceCounts('test_compute0', total=3, free=2)
+ # the migration allocation is moved back to the instance UUID
+ self._move_server_allocation(
+ test_compute0_placement_pci_view["allocations"],
+ server["id"],
+ revert=True,
+ )
+ # and the "dest" side of the allocation is dropped
+ test_compute0_placement_pci_view[
+ "usages"]["0000:81:01.0"][self.PCI_RC] = 0
+ test_compute0_placement_pci_view[
+ "usages"]["0000:81:02.0"][self.PCI_RC] = 0
+ test_compute0_placement_pci_view["allocations"][server['id']] = {
+ "0000:81:00.0": {self.PCI_RC: 1},
+ }
+ self.assert_placement_pci_view(
+ "test_compute0", **test_compute0_placement_pci_view)
+
+ # resize again but now confirm the same host resize and assert that
+ # only the new flavor usage remains
+ with mock.patch(
+ 'nova.virt.libvirt.driver.LibvirtDriver'
+ '.migrate_disk_and_power_off',
+ return_value='{}',
+ ):
+ self._resize_server(server, pci_flavor_id)
+ self._confirm_resize(server)
+
+ self.assertPCIDeviceCounts('test_compute0', total=3, free=1)
+ test_compute0_placement_pci_view["usages"] = {
+ "0000:81:01.0": {self.PCI_RC: 1},
+ "0000:81:02.0": {self.PCI_RC: 1},
+ }
+ test_compute0_placement_pci_view["allocations"][
+ server['id']] = {self.PCI_RC: 1}
+ test_compute0_placement_pci_view["allocations"][server['id']] = {
+ "0000:81:01.0": {self.PCI_RC: 1},
+ "0000:81:02.0": {self.PCI_RC: 1},
+ }
+ self.assert_no_pci_healing("test_compute0")
def _confirm_resize(self, server, host='host1'):
# NOTE(sbauza): Unfortunately, _cleanup_resize() in libvirt checks the
@@ -1124,7 +2498,6 @@ class PCIServersTest(_PCIServersTestBase):
self.flags(host=orig_host)
def test_cold_migrate_server_with_pci(self):
-
host_devices = {}
orig_create = nova.virt.libvirt.guest.Guest.create
@@ -1153,6 +2526,41 @@ class PCIServersTest(_PCIServersTestBase):
for hostname in ('test_compute0', 'test_compute1'):
pci_info = fakelibvirt.HostPCIDevicesInfo(num_pci=2)
self.start_compute(hostname=hostname, pci_info=pci_info)
+ test_compute0_placement_pci_view = {
+ "inventories": {
+ "0000:81:00.0": {self.PCI_RC: 1},
+ "0000:81:01.0": {self.PCI_RC: 1},
+ },
+ "traits": {
+ "0000:81:00.0": [],
+ "0000:81:01.0": [],
+ },
+ "usages": {
+ "0000:81:00.0": {self.PCI_RC: 0},
+ "0000:81:01.0": {self.PCI_RC: 0},
+ },
+ "allocations": {},
+ }
+ self.assert_placement_pci_view(
+ "test_compute0", **test_compute0_placement_pci_view)
+
+ test_compute1_placement_pci_view = {
+ "inventories": {
+ "0000:81:00.0": {self.PCI_RC: 1},
+ "0000:81:01.0": {self.PCI_RC: 1},
+ },
+ "traits": {
+ "0000:81:00.0": [],
+ "0000:81:01.0": [],
+ },
+ "usages": {
+ "0000:81:00.0": {self.PCI_RC: 0},
+ "0000:81:01.0": {self.PCI_RC: 0},
+ },
+ "allocations": {},
+ }
+ self.assert_placement_pci_view(
+ "test_compute1", **test_compute1_placement_pci_view)
# boot an instance with a PCI device on each host
extra_spec = {
@@ -1160,8 +2568,16 @@ class PCIServersTest(_PCIServersTestBase):
}
flavor_id = self._create_flavor(extra_spec=extra_spec)
+ # force the allocation on test_compute0 to 81:00 to make it easy
+ # to assert the placement allocation
+ self._reserve_placement_resource(
+ "test_compute0_0000:81:01.0", self.PCI_RC, 1)
server_a = self._create_server(
flavor_id=flavor_id, networks='none', host='test_compute0')
+ # force the allocation on test_compute1 to 81:00 to make it easy
+ # to assert the placement allocation
+ self._reserve_placement_resource(
+ "test_compute1_0000:81:01.0", self.PCI_RC, 1)
server_b = self._create_server(
flavor_id=flavor_id, networks='none', host='test_compute1')
@@ -1173,6 +2589,25 @@ class PCIServersTest(_PCIServersTestBase):
for hostname in ('test_compute0', 'test_compute1'):
self.assertPCIDeviceCounts(hostname, total=2, free=1)
+ test_compute0_placement_pci_view["usages"][
+ "0000:81:00.0"][self.PCI_RC] = 1
+ test_compute0_placement_pci_view["allocations"][
+ server_a['id']] = {"0000:81:00.0": {self.PCI_RC: 1}}
+ self.assert_placement_pci_view(
+ "test_compute0", **test_compute0_placement_pci_view)
+
+ test_compute1_placement_pci_view[
+ "usages"]["0000:81:00.0"][self.PCI_RC] = 1
+ test_compute1_placement_pci_view["allocations"][
+ server_b['id']] = {"0000:81:00.0": {self.PCI_RC: 1}}
+ self.assert_placement_pci_view(
+ "test_compute1", **test_compute1_placement_pci_view)
+
+ # remove the resource reservation from test_compute1 to be able to
+ # migrate server_a there
+ self._reserve_placement_resource(
+ "test_compute1_0000:81:01.0", self.PCI_RC, 0)
+
# TODO(stephenfin): The mock of 'migrate_disk_and_power_off' should
# probably be less...dumb
with mock.patch(
@@ -1190,19 +2625,390 @@ class PCIServersTest(_PCIServersTestBase):
server_a['OS-EXT-SRV-ATTR:host'], server_b['OS-EXT-SRV-ATTR:host'],
)
self.assertPCIDeviceCounts('test_compute0', total=2, free=1)
+ # on the source host the allocation is now held by the migration UUID
+ self._move_server_allocation(
+ test_compute0_placement_pci_view["allocations"], server_a['id'])
+ self.assert_placement_pci_view(
+ "test_compute0", **test_compute0_placement_pci_view)
+
self.assertPCIDeviceCounts('test_compute1', total=2, free=0)
+ # sever_a now have allocation on test_compute1 on 81:01
+ test_compute1_placement_pci_view["usages"][
+ "0000:81:01.0"][self.PCI_RC] = 1
+ test_compute1_placement_pci_view["allocations"][
+ server_a['id']] = {"0000:81:01.0": {self.PCI_RC: 1}}
+ self.assert_placement_pci_view(
+ "test_compute1", **test_compute1_placement_pci_view)
# now, confirm the migration and check our counts once again
self._confirm_resize(server_a)
self.assertPCIDeviceCounts('test_compute0', total=2, free=2)
+ # the source host now has no allocations as the migration allocation
+ # is removed by confirm resize
+ test_compute0_placement_pci_view["usages"] = {
+ "0000:81:00.0": {self.PCI_RC: 0},
+ "0000:81:01.0": {self.PCI_RC: 0},
+ }
+ test_compute0_placement_pci_view["allocations"] = {}
+ self.assert_placement_pci_view(
+ "test_compute0", **test_compute0_placement_pci_view)
+
self.assertPCIDeviceCounts('test_compute1', total=2, free=0)
+ self.assert_placement_pci_view(
+ "test_compute1", **test_compute1_placement_pci_view)
+
+ self.assert_no_pci_healing("test_compute0")
+ self.assert_no_pci_healing("test_compute1")
+
+ def test_request_two_pci_but_host_has_one(self):
+ # simulate a single type-PCI device on the host
+ self.start_compute(pci_info=fakelibvirt.HostPCIDevicesInfo(num_pci=1))
+ self.assertPCIDeviceCounts('compute1', total=1, free=1)
+
+ alias = [jsonutils.dumps(x) for x in (
+ {
+ 'vendor_id': fakelibvirt.PCI_VEND_ID,
+ 'product_id': fakelibvirt.PCI_PROD_ID,
+ 'name': 'a1',
+ },
+ {
+ 'vendor_id': fakelibvirt.PCI_VEND_ID,
+ 'product_id': fakelibvirt.PCI_PROD_ID,
+ 'name': 'a2',
+ },
+ )]
+ self.flags(group='pci', alias=alias)
+ # request two PCI devices both are individually matching with the
+ # single available device on the host
+ extra_spec = {'pci_passthrough:alias': 'a1:1,a2:1'}
+ flavor_id = self._create_flavor(extra_spec=extra_spec)
+ # so we expect that the boot fails with no valid host error as only
+ # one of the requested PCI device can be allocated
+ server = self._create_server(
+ flavor_id=flavor_id, networks="none", expected_state='ERROR')
+ self.assertIn('fault', server)
+ self.assertIn('No valid host', server['fault']['message'])
+
+ def _create_two_computes(self):
+ self.start_compute(
+ hostname='test_compute0',
+ pci_info=fakelibvirt.HostPCIDevicesInfo(num_pci=1))
+ self.assertPCIDeviceCounts('test_compute0', total=1, free=1)
+ test_compute0_placement_pci_view = {
+ "inventories": {"0000:81:00.0": {self.PCI_RC: 1}},
+ "traits": {"0000:81:00.0": []},
+ "usages": {"0000:81:00.0": {self.PCI_RC: 0}},
+ "allocations": {},
+ }
+ self.assert_placement_pci_view(
+ "test_compute0", **test_compute0_placement_pci_view)
+
+ self.start_compute(
+ hostname='test_compute1',
+ pci_info=fakelibvirt.HostPCIDevicesInfo(num_pci=1),
+ )
+ self.assertPCIDeviceCounts('test_compute1', total=1, free=1)
+ test_compute1_placement_pci_view = {
+ "inventories": {"0000:81:00.0": {self.PCI_RC: 1}},
+ "traits": {"0000:81:00.0": []},
+ "usages": {"0000:81:00.0": {self.PCI_RC: 0}},
+ "allocations": {},
+ }
+ self.assert_placement_pci_view(
+ "test_compute1", **test_compute1_placement_pci_view)
+
+ return (
+ test_compute0_placement_pci_view,
+ test_compute1_placement_pci_view,
+ )
+
+ def _create_two_computes_and_an_instance_on_the_first(self):
+ (
+ test_compute0_placement_pci_view,
+ test_compute1_placement_pci_view,
+ ) = self._create_two_computes()
+
+ # boot a VM on test_compute0 with a single PCI dev
+ extra_spec = {'pci_passthrough:alias': f'{self.ALIAS_NAME}:1'}
+ pci_flavor_id = self._create_flavor(extra_spec=extra_spec)
+ server = self._create_server(
+ flavor_id=pci_flavor_id, networks='none', host="test_compute0")
+
+ self.assertPCIDeviceCounts('test_compute0', total=1, free=0)
+ test_compute0_placement_pci_view["usages"][
+ "0000:81:00.0"][self.PCI_RC] = 1
+ test_compute0_placement_pci_view["allocations"][
+ server['id']] = {"0000:81:00.0": {self.PCI_RC: 1}}
+ self.assert_placement_pci_view(
+ "test_compute0", **test_compute0_placement_pci_view)
+
+ return (
+ server,
+ test_compute0_placement_pci_view,
+ test_compute1_placement_pci_view,
+ )
+
+ def test_evacuate(self):
+ (
+ server,
+ test_compute0_placement_pci_view,
+ test_compute1_placement_pci_view,
+ ) = self._create_two_computes_and_an_instance_on_the_first()
+
+ # kill test_compute0 and evacuate the instance
+ self.computes['test_compute0'].stop()
+ self.api.put_service(
+ self.computes["test_compute0"].service_ref.uuid,
+ {"forced_down": True},
+ )
+ self._evacuate_server(server)
+ # source allocation should be kept as source is dead but the server
+ # now has allocation on both hosts as evacuation does not use migration
+ # allocations.
+ self.assertPCIDeviceCounts('test_compute0', total=1, free=0)
+ self.assert_placement_pci_inventory(
+ "test_compute0",
+ test_compute0_placement_pci_view["inventories"],
+ test_compute0_placement_pci_view["traits"]
+ )
+ self.assert_placement_pci_usages(
+ "test_compute0", test_compute0_placement_pci_view["usages"]
+ )
+ self.assert_placement_pci_allocations(
+ {
+ server['id']: {
+ "test_compute0": {
+ "VCPU": 2,
+ "MEMORY_MB": 2048,
+ "DISK_GB": 20,
+ },
+ "test_compute0_0000:81:00.0": {self.PCI_RC: 1},
+ "test_compute1": {
+ "VCPU": 2,
+ "MEMORY_MB": 2048,
+ "DISK_GB": 20,
+ },
+ "test_compute1_0000:81:00.0": {self.PCI_RC: 1},
+ },
+ }
+ )
+
+ # dest allocation should be created
+ self.assertPCIDeviceCounts('test_compute1', total=1, free=0)
+ test_compute1_placement_pci_view["usages"][
+ "0000:81:00.0"][self.PCI_RC] = 1
+ test_compute1_placement_pci_view["allocations"][
+ server['id']] = {"0000:81:00.0": {self.PCI_RC: 1}}
+ self.assert_placement_pci_inventory(
+ "test_compute1",
+ test_compute1_placement_pci_view["inventories"],
+ test_compute1_placement_pci_view["traits"]
+ )
+ self.assert_placement_pci_usages(
+ "test_compute1", test_compute0_placement_pci_view["usages"]
+ )
+
+ # recover test_compute0 and check that it is cleaned
+ self.restart_compute_service('test_compute0')
+
+ self.assertPCIDeviceCounts('test_compute0', total=1, free=1)
+ test_compute0_placement_pci_view = {
+ "inventories": {"0000:81:00.0": {self.PCI_RC: 1}},
+ "traits": {"0000:81:00.0": []},
+ "usages": {"0000:81:00.0": {self.PCI_RC: 0}},
+ "allocations": {},
+ }
+ self.assert_placement_pci_view(
+ "test_compute0", **test_compute0_placement_pci_view)
+
+ # and test_compute1 is not changes (expect that the instance now has
+ # only allocation on this compute)
+ self.assertPCIDeviceCounts('test_compute1', total=1, free=0)
+ test_compute1_placement_pci_view["usages"][
+ "0000:81:00.0"][self.PCI_RC] = 1
+ test_compute1_placement_pci_view["allocations"][
+ server['id']] = {"0000:81:00.0": {self.PCI_RC: 1}}
+ self.assert_placement_pci_view(
+ "test_compute1", **test_compute1_placement_pci_view)
+
+ self.assert_no_pci_healing("test_compute0")
+ self.assert_no_pci_healing("test_compute1")
+
+ def test_unshelve_after_offload(self):
+ (
+ server,
+ test_compute0_placement_pci_view,
+ test_compute1_placement_pci_view,
+ ) = self._create_two_computes_and_an_instance_on_the_first()
+
+ # shelve offload the server
+ self._shelve_server(server)
+
+ # source allocation should be freed
+ self.assertPCIDeviceCounts('test_compute0', total=1, free=1)
+ test_compute0_placement_pci_view["usages"][
+ "0000:81:00.0"][self.PCI_RC] = 0
+ del test_compute0_placement_pci_view["allocations"][server['id']]
+ self.assert_placement_pci_view(
+ "test_compute0", **test_compute0_placement_pci_view)
+
+ # test_compute1 should not be touched
+ self.assertPCIDeviceCounts('test_compute1', total=1, free=1)
+ self.assert_placement_pci_view(
+ "test_compute1", **test_compute1_placement_pci_view)
+
+ # disable test_compute0 and unshelve the instance
+ self.api.put_service(
+ self.computes["test_compute0"].service_ref.uuid,
+ {"status": "disabled"},
+ )
+ self._unshelve_server(server)
+
+ # test_compute0 should be unchanged
+ self.assertPCIDeviceCounts('test_compute0', total=1, free=1)
+ self.assert_placement_pci_view(
+ "test_compute0", **test_compute0_placement_pci_view)
+
+ # test_compute1 should be allocated
+ self.assertPCIDeviceCounts('test_compute1', total=1, free=0)
+ test_compute1_placement_pci_view["usages"][
+ "0000:81:00.0"][self.PCI_RC] = 1
+ test_compute1_placement_pci_view["allocations"][
+ server['id']] = {"0000:81:00.0": {self.PCI_RC: 1}}
+ self.assert_placement_pci_view(
+ "test_compute1", **test_compute1_placement_pci_view)
+
+ self.assert_no_pci_healing("test_compute0")
+ self.assert_no_pci_healing("test_compute1")
+
+ def test_reschedule(self):
+ (
+ test_compute0_placement_pci_view,
+ test_compute1_placement_pci_view,
+ ) = self._create_two_computes()
+
+ # try to boot a VM with a single device but inject fault on the first
+ # compute so that the VM is re-scheduled to the other
+ extra_spec = {'pci_passthrough:alias': f'{self.ALIAS_NAME}:1'}
+ pci_flavor_id = self._create_flavor(extra_spec=extra_spec)
+
+ calls = []
+ orig_guest_create = (
+ nova.virt.libvirt.driver.LibvirtDriver._create_guest)
+
+ def fake_guest_create(*args, **kwargs):
+ if not calls:
+ calls.append(1)
+ raise fakelibvirt.make_libvirtError(
+ fakelibvirt.libvirtError,
+ "internal error",
+ error_code=fakelibvirt.VIR_ERR_INTERNAL_ERROR,
+ )
+ else:
+ return orig_guest_create(*args, **kwargs)
+
+ with mock.patch(
+ 'nova.virt.libvirt.driver.LibvirtDriver._create_guest',
+ new=fake_guest_create
+ ):
+ server = self._create_server(
+ flavor_id=pci_flavor_id, networks='none')
+
+ compute_pci_view_map = {
+ 'test_compute0': test_compute0_placement_pci_view,
+ 'test_compute1': test_compute1_placement_pci_view,
+ }
+ allocated_compute = server['OS-EXT-SRV-ATTR:host']
+ not_allocated_compute = (
+ "test_compute0"
+ if allocated_compute == "test_compute1"
+ else "test_compute1"
+ )
+
+ allocated_pci_view = compute_pci_view_map.pop(
+ server['OS-EXT-SRV-ATTR:host'])
+ not_allocated_pci_view = list(compute_pci_view_map.values())[0]
+
+ self.assertPCIDeviceCounts(allocated_compute, total=1, free=0)
+ allocated_pci_view["usages"][
+ "0000:81:00.0"][self.PCI_RC] = 1
+ allocated_pci_view["allocations"][
+ server['id']] = {"0000:81:00.0": {self.PCI_RC: 1}}
+ self.assert_placement_pci_view(allocated_compute, **allocated_pci_view)
+
+ self.assertPCIDeviceCounts(not_allocated_compute, total=1, free=1)
+ self.assert_placement_pci_view(
+ not_allocated_compute, **not_allocated_pci_view)
+ self.assert_no_pci_healing("test_compute0")
+ self.assert_no_pci_healing("test_compute1")
+
+ def test_multi_create(self):
+ self.start_compute(
+ hostname='test_compute0',
+ pci_info=fakelibvirt.HostPCIDevicesInfo(num_pci=3))
+ self.assertPCIDeviceCounts('test_compute0', total=3, free=3)
+ test_compute0_placement_pci_view = {
+ "inventories": {
+ "0000:81:00.0": {self.PCI_RC: 1},
+ "0000:81:01.0": {self.PCI_RC: 1},
+ "0000:81:02.0": {self.PCI_RC: 1},
+ },
+ "traits": {
+ "0000:81:00.0": [],
+ "0000:81:01.0": [],
+ "0000:81:02.0": [],
+ },
+ "usages": {
+ "0000:81:00.0": {self.PCI_RC: 0},
+ "0000:81:01.0": {self.PCI_RC: 0},
+ "0000:81:02.0": {self.PCI_RC: 0},
+ },
+ "allocations": {},
+ }
+ self.assert_placement_pci_view(
+ "test_compute0", **test_compute0_placement_pci_view)
+
+ extra_spec = {'pci_passthrough:alias': f'{self.ALIAS_NAME}:1'}
+ pci_flavor_id = self._create_flavor(extra_spec=extra_spec)
+ body = self._build_server(flavor_id=pci_flavor_id, networks='none')
+ body.update(
+ {
+ "min_count": "2",
+ }
+ )
+ self.api.post_server({'server': body})
+
+ servers = self.api.get_servers(detail=False)
+ for server in servers:
+ self._wait_for_state_change(server, 'ACTIVE')
+
+ self.assertEqual(2, len(servers))
+ self.assertPCIDeviceCounts('test_compute0', total=3, free=1)
+ # we have no way to influence which instance takes which device, so
+ # we need to look at the nova DB to properly assert the placement
+ # allocation
+ devices = objects.PciDeviceList.get_by_compute_node(
+ self.ctxt,
+ objects.ComputeNode.get_by_nodename(self.ctxt, 'test_compute0').id,
+ )
+ for dev in devices:
+ if dev.instance_uuid:
+ test_compute0_placement_pci_view["usages"][
+ dev.address][self.PCI_RC] = 1
+ test_compute0_placement_pci_view["allocations"][
+ dev.instance_uuid] = {dev.address: {self.PCI_RC: 1}}
+
+ self.assert_placement_pci_view(
+ "test_compute0", **test_compute0_placement_pci_view)
+
+ self.assert_no_pci_healing("test_compute0")
class PCIServersWithPreferredNUMATest(_PCIServersTestBase):
ALIAS_NAME = 'a1'
- PCI_PASSTHROUGH_WHITELIST = [jsonutils.dumps(
+ PCI_DEVICE_SPEC = [jsonutils.dumps(
{
'vendor_id': fakelibvirt.PCI_VEND_ID,
'product_id': fakelibvirt.PCI_PROD_ID,
@@ -1219,6 +3025,11 @@ class PCIServersWithPreferredNUMATest(_PCIServersTestBase):
)]
expected_state = 'ACTIVE'
+ def setUp(self):
+ super().setUp()
+ self.flags(group="pci", report_in_placement=True)
+ self.flags(group='filter_scheduler', pci_in_placement=True)
+
def test_create_server_with_pci_dev_and_numa(self):
"""Validate behavior of 'preferred' PCI NUMA policy.
@@ -1231,6 +3042,20 @@ class PCIServersWithPreferredNUMATest(_PCIServersTestBase):
pci_info = fakelibvirt.HostPCIDevicesInfo(num_pci=1, numa_node=0)
self.start_compute(pci_info=pci_info)
+ compute1_placement_pci_view = {
+ "inventories": {
+ "0000:81:00.0": {self.PCI_RC: 1},
+ },
+ "traits": {
+ "0000:81:00.0": [],
+ },
+ "usages": {
+ "0000:81:00.0": {self.PCI_RC: 0},
+ },
+ "allocations": {},
+ }
+ self.assert_placement_pci_view(
+ "compute1", **compute1_placement_pci_view)
# boot one instance with no PCI device to "fill up" NUMA node 0
extra_spec = {
@@ -1239,13 +3064,26 @@ class PCIServersWithPreferredNUMATest(_PCIServersTestBase):
flavor_id = self._create_flavor(vcpu=4, extra_spec=extra_spec)
self._create_server(flavor_id=flavor_id)
+ self.assert_placement_pci_view(
+ "compute1", **compute1_placement_pci_view)
+
# now boot one with a PCI device, which should succeed thanks to the
# use of the PCI policy
extra_spec['pci_passthrough:alias'] = '%s:1' % self.ALIAS_NAME
flavor_id = self._create_flavor(extra_spec=extra_spec)
- self._create_server(
+ server_with_pci = self._create_server(
flavor_id=flavor_id, expected_state=self.expected_state)
+ if self.expected_state == 'ACTIVE':
+ compute1_placement_pci_view["usages"][
+ "0000:81:00.0"][self.PCI_RC] = 1
+ compute1_placement_pci_view["allocations"][
+ server_with_pci['id']] = {"0000:81:00.0": {self.PCI_RC: 1}}
+
+ self.assert_placement_pci_view(
+ "compute1", **compute1_placement_pci_view)
+ self.assert_no_pci_healing("compute1")
+
class PCIServersWithRequiredNUMATest(PCIServersWithPreferredNUMATest):
@@ -1261,12 +3099,105 @@ class PCIServersWithRequiredNUMATest(PCIServersWithPreferredNUMATest):
)]
expected_state = 'ERROR'
+ def setUp(self):
+ super().setUp()
+ self.useFixture(
+ fixtures.MockPatch(
+ 'nova.pci.utils.is_physical_function', return_value=False
+ )
+ )
+
+ def test_create_server_with_pci_dev_and_numa_placement_conflict(self):
+ # fakelibvirt will simulate the devices:
+ # * one type-PCI in 81.00 on numa 0
+ # * one type-PCI in 81.01 on numa 1
+ pci_info = fakelibvirt.HostPCIDevicesInfo(num_pci=2)
+ # the device_spec will assign different traits to 81.00 than 81.01
+ # so the two devices become different from placement perspective
+ device_spec = self._to_list_of_json_str(
+ [
+ {
+ 'vendor_id': fakelibvirt.PCI_VEND_ID,
+ 'product_id': fakelibvirt.PCI_PROD_ID,
+ "address": "0000:81:00.0",
+ "traits": "green",
+ },
+ {
+ 'vendor_id': fakelibvirt.PCI_VEND_ID,
+ 'product_id': fakelibvirt.PCI_PROD_ID,
+ "address": "0000:81:01.0",
+ "traits": "red",
+ },
+ ]
+ )
+ self.flags(group='pci', device_spec=device_spec)
+ # both numa 0 and numa 1 has 4 PCPUs
+ self.flags(cpu_dedicated_set='0-7', group='compute')
+ self.start_compute(pci_info=pci_info)
+ compute1_placement_pci_view = {
+ "inventories": {
+ "0000:81:00.0": {self.PCI_RC: 1},
+ "0000:81:01.0": {self.PCI_RC: 1},
+ },
+ "traits": {
+ "0000:81:00.0": ["CUSTOM_GREEN"],
+ "0000:81:01.0": ["CUSTOM_RED"],
+ },
+ "usages": {
+ "0000:81:00.0": {self.PCI_RC: 0},
+ "0000:81:01.0": {self.PCI_RC: 0},
+ },
+ "allocations": {},
+ }
+ self.assert_placement_pci_view(
+ "compute1", **compute1_placement_pci_view)
+
+ # boot one instance with no PCI device to "fill up" NUMA node 0
+ # so we will have PCPUs on numa 0 and we have PCI on both nodes
+ extra_spec = {
+ 'hw:cpu_policy': 'dedicated',
+ }
+ flavor_id = self._create_flavor(vcpu=4, extra_spec=extra_spec)
+ self._create_server(flavor_id=flavor_id)
+
+ pci_alias = {
+ "resource_class": self.PCI_RC,
+ # this means only 81.00 will match in placement which is on numa 0
+ "traits": "green",
+ "name": "pci-dev",
+ # this forces the scheduler to only accept a solution where the
+ # PCI device is on the same numa node as the pinned CPUs
+ 'numa_policy': fields.PCINUMAAffinityPolicy.REQUIRED,
+ }
+ self.flags(
+ group="pci",
+ alias=self._to_list_of_json_str([pci_alias]),
+ )
+
+ # Ask for dedicated CPUs, that can only be fulfilled on numa 1.
+ # And ask for a PCI alias that can only be fulfilled on numa 0 due to
+ # trait request.
+ # We expect that this makes the scheduling fail.
+ extra_spec = {
+ "hw:cpu_policy": "dedicated",
+ "pci_passthrough:alias": "pci-dev:1",
+ }
+ flavor_id = self._create_flavor(extra_spec=extra_spec)
+ server = self._create_server(
+ flavor_id=flavor_id, expected_state="ERROR")
+
+ self.assertIn('fault', server)
+ self.assertIn('No valid host', server['fault']['message'])
+ self.assert_placement_pci_view(
+ "compute1", **compute1_placement_pci_view)
+ self.assert_no_pci_healing("compute1")
+
@ddt.ddt
class PCIServersWithSRIOVAffinityPoliciesTest(_PCIServersTestBase):
ALIAS_NAME = 'a1'
- PCI_PASSTHROUGH_WHITELIST = [jsonutils.dumps(
+ PCI_DEVICE_SPEC = [jsonutils.dumps(
{
'vendor_id': fakelibvirt.PCI_VEND_ID,
'product_id': fakelibvirt.PCI_PROD_ID,
@@ -1296,7 +3227,7 @@ class PCIServersWithSRIOVAffinityPoliciesTest(_PCIServersTestBase):
num_pci=1, numa_node=pci_numa_node)
self.start_compute(pci_info=pci_info)
- # request cpu pinning to create a numa toplogy and allow the test to
+ # request cpu pinning to create a numa topology and allow the test to
# force which numa node the vm would have to be pinned too.
extra_spec = {
'hw:cpu_policy': 'dedicated',
@@ -1371,9 +3302,11 @@ class PCIServersWithSRIOVAffinityPoliciesTest(_PCIServersTestBase):
}
)]
- self.flags(passthrough_whitelist=self.PCI_PASSTHROUGH_WHITELIST,
- alias=alias,
- group='pci')
+ self.flags(
+ device_spec=self.PCI_DEVICE_SPEC,
+ alias=alias,
+ group='pci'
+ )
self._test_policy(pci_numa_node, status, 'required')
@@ -1451,7 +3384,7 @@ class PCIServersWithSRIOVAffinityPoliciesTest(_PCIServersTestBase):
class PCIServersWithPortNUMAPoliciesTest(_PCIServersTestBase):
ALIAS_NAME = 'a1'
- PCI_PASSTHROUGH_WHITELIST = [jsonutils.dumps(x) for x in (
+ PCI_DEVICE_SPEC = [jsonutils.dumps(x) for x in (
{
'vendor_id': fakelibvirt.PCI_VEND_ID,
'product_id': fakelibvirt.PF_PROD_ID,
@@ -1507,7 +3440,7 @@ class PCIServersWithPortNUMAPoliciesTest(_PCIServersTestBase):
num_pfs=1, num_vfs=2, numa_node=pci_numa_node)
self.start_compute(pci_info=pci_info)
- # request cpu pinning to create a numa toplogy and allow the test to
+ # request cpu pinning to create a numa topology and allow the test to
# force which numa node the vm would have to be pinned too.
extra_spec = {
'hw:cpu_policy': 'dedicated',
@@ -1589,9 +3522,11 @@ class PCIServersWithPortNUMAPoliciesTest(_PCIServersTestBase):
}
)]
- self.flags(passthrough_whitelist=self.PCI_PASSTHROUGH_WHITELIST,
- alias=alias,
- group='pci')
+ self.flags(
+ device_spec=self.PCI_DEVICE_SPEC,
+ alias=alias,
+ group='pci'
+ )
self._test_policy(pci_numa_node, status, 'required')
@@ -1680,3 +3615,568 @@ class PCIServersWithPortNUMAPoliciesTest(_PCIServersTestBase):
],
)
self.assertTrue(self.mock_filter.called)
+
+
+class RemoteManagedServersTest(_PCIServersWithMigrationTestBase):
+
+ ADMIN_API = True
+ microversion = 'latest'
+
+ PCI_DEVICE_SPEC = [jsonutils.dumps(x) for x in (
+ # A PF with access to physnet4.
+ {
+ 'vendor_id': '15b3',
+ 'product_id': 'a2dc',
+ 'physical_network': 'physnet4',
+ 'remote_managed': 'false',
+ },
+ # A VF with access to physnet4.
+ {
+ 'vendor_id': '15b3',
+ 'product_id': '1021',
+ 'physical_network': 'physnet4',
+ 'remote_managed': 'true',
+ },
+ # A PF programmed to forward traffic to an overlay network.
+ {
+ 'vendor_id': '15b3',
+ 'product_id': 'a2d6',
+ 'physical_network': None,
+ 'remote_managed': 'false',
+ },
+ # A VF programmed to forward traffic to an overlay network.
+ {
+ 'vendor_id': '15b3',
+ 'product_id': '101e',
+ 'physical_network': None,
+ 'remote_managed': 'true',
+ },
+ )]
+
+ PCI_ALIAS = []
+
+ NUM_PFS = 1
+ NUM_VFS = 4
+ vf_ratio = NUM_VFS // NUM_PFS
+
+ # Min Libvirt version that supports working with PCI VPD.
+ FAKE_LIBVIRT_VERSION = 7_009_000 # 7.9.0
+ FAKE_QEMU_VERSION = 5_001_000 # 5.1.0
+
+ def setUp(self):
+ super().setUp()
+ self.neutron = self.useFixture(base.LibvirtNeutronFixture(self))
+
+ self.useFixture(fixtures.MockPatch(
+ 'nova.pci.utils.get_vf_num_by_pci_address',
+ new=mock.MagicMock(
+ side_effect=lambda addr: self._get_pci_function_number(addr))))
+
+ self.useFixture(fixtures.MockPatch(
+ 'nova.pci.utils.get_mac_by_pci_address',
+ new=mock.MagicMock(
+ side_effect=(
+ lambda addr: {
+ "0000:80:00.0": "52:54:00:1e:59:42",
+ "0000:81:00.0": "52:54:00:1e:59:01",
+ "0000:82:00.0": "52:54:00:1e:59:02",
+ }.get(addr)
+ )
+ )
+ ))
+
+ @classmethod
+ def _get_pci_function_number(cls, pci_addr: str):
+ """Get a VF function number based on a PCI address.
+
+ Assume that the PCI ARI capability is enabled (slot bits become a part
+ of a function number).
+ """
+ _, _, slot, function = parse_address(pci_addr)
+ # The number of PFs is extracted to get a VF number.
+ return int(slot, 16) + int(function, 16) - cls.NUM_PFS
+
+ def start_compute(
+ self, hostname='test_compute0', host_info=None, pci_info=None,
+ mdev_info=None, vdpa_info=None,
+ libvirt_version=None,
+ qemu_version=None):
+
+ if not pci_info:
+ pci_info = fakelibvirt.HostPCIDevicesInfo(
+ num_pci=0, num_pfs=0, num_vfs=0)
+
+ pci_info.add_device(
+ dev_type='PF',
+ bus=0x81,
+ slot=0x0,
+ function=0,
+ iommu_group=42,
+ numa_node=0,
+ vf_ratio=self.vf_ratio,
+ vend_id='15b3',
+ vend_name='Mellanox Technologies',
+ prod_id='a2dc',
+ prod_name='BlueField-3 integrated ConnectX-7 controller',
+ driver_name='mlx5_core',
+ vpd_fields={
+ 'name': 'MT43244 BlueField-3 integrated ConnectX-7',
+ 'readonly': {
+ 'serial_number': 'MT0000X00001',
+ },
+ }
+ )
+
+ for idx in range(self.NUM_VFS):
+ pci_info.add_device(
+ dev_type='VF',
+ bus=0x81,
+ slot=0x0,
+ function=idx + 1,
+ iommu_group=idx + 43,
+ numa_node=0,
+ vf_ratio=self.vf_ratio,
+ parent=(0x81, 0x0, 0),
+ vend_id='15b3',
+ vend_name='Mellanox Technologies',
+ prod_id='1021',
+ prod_name='MT2910 Family [ConnectX-7]',
+ driver_name='mlx5_core',
+ vpd_fields={
+ 'name': 'MT2910 Family [ConnectX-7]',
+ 'readonly': {
+ 'serial_number': 'MT0000X00001',
+ },
+ }
+ )
+
+ pci_info.add_device(
+ dev_type='PF',
+ bus=0x82,
+ slot=0x0,
+ function=0,
+ iommu_group=84,
+ numa_node=0,
+ vf_ratio=self.vf_ratio,
+ vend_id='15b3',
+ vend_name='Mellanox Technologies',
+ prod_id='a2d6',
+ prod_name='MT42822 BlueField-2 integrated ConnectX-6',
+ driver_name='mlx5_core',
+ vpd_fields={
+ 'name': 'MT42822 BlueField-2 integrated ConnectX-6',
+ 'readonly': {
+ 'serial_number': 'MT0000X00002',
+ },
+ }
+ )
+
+ for idx in range(self.NUM_VFS):
+ pci_info.add_device(
+ dev_type='VF',
+ bus=0x82,
+ slot=0x0,
+ function=idx + 1,
+ iommu_group=idx + 85,
+ numa_node=0,
+ vf_ratio=self.vf_ratio,
+ parent=(0x82, 0x0, 0),
+ vend_id='15b3',
+ vend_name='Mellanox Technologies',
+ prod_id='101e',
+ prod_name='ConnectX Family mlx5Gen Virtual Function',
+ driver_name='mlx5_core')
+
+ return super().start_compute(
+ hostname=hostname, host_info=host_info, pci_info=pci_info,
+ mdev_info=mdev_info, vdpa_info=vdpa_info,
+ libvirt_version=libvirt_version or self.FAKE_LIBVIRT_VERSION,
+ qemu_version=qemu_version or self.FAKE_QEMU_VERSION)
+
+ def create_remote_managed_tunnel_port(self):
+ dpu_tunnel_port = {
+ 'id': uuids.dpu_tunnel_port,
+ 'network_id': self.neutron.network_3['id'],
+ 'status': 'ACTIVE',
+ 'mac_address': 'fa:16:3e:f0:a4:bb',
+ 'fixed_ips': [
+ {
+ 'ip_address': '192.168.2.8',
+ 'subnet_id': self.neutron.subnet_3['id']
+ }
+ ],
+ 'binding:vif_details': {},
+ 'binding:vif_type': 'ovs',
+ 'binding:vnic_type': 'remote-managed',
+ }
+
+ self.neutron.create_port({'port': dpu_tunnel_port})
+ return dpu_tunnel_port
+
+ def create_remote_managed_physnet_port(self):
+ dpu_physnet_port = {
+ 'id': uuids.dpu_physnet_port,
+ 'network_id': self.neutron.network_4['id'],
+ 'status': 'ACTIVE',
+ 'mac_address': 'd2:0b:fd:99:89:8b',
+ 'fixed_ips': [
+ {
+ 'ip_address': '192.168.4.10',
+ 'subnet_id': self.neutron.subnet_4['id']
+ }
+ ],
+ 'binding:vif_details': {},
+ 'binding:vif_type': 'ovs',
+ 'binding:vnic_type': 'remote-managed',
+ }
+
+ self.neutron.create_port({'port': dpu_physnet_port})
+ return dpu_physnet_port
+
+ def test_create_server_physnet(self):
+ """Create an instance with a tunnel remote-managed port."""
+
+ hostname = self.start_compute()
+ num_pci = (self.NUM_PFS + self.NUM_VFS) * 2
+
+ self.assertPCIDeviceCounts(hostname, total=num_pci, free=num_pci)
+
+ dpu_port = self.create_remote_managed_physnet_port()
+
+ port = self.neutron.show_port(dpu_port['id'])['port']
+ self.assertNotIn('binding:profile', port)
+
+ self._create_server(networks=[{'port': dpu_port['id']}])
+
+ # Ensure there is one less VF available and that the PF
+ # is no longer usable.
+ self.assertPCIDeviceCounts(hostname, total=num_pci, free=num_pci - 2)
+
+ # Ensure the binding:profile details sent to Neutron are correct after
+ # a port update.
+ port = self.neutron.show_port(dpu_port['id'])['port']
+ self.assertIn('binding:profile', port)
+ self.assertEqual({
+ 'card_serial_number': 'MT0000X00001',
+ 'pci_slot': '0000:81:00.4',
+ 'pci_vendor_info': '15b3:1021',
+ 'pf_mac_address': '52:54:00:1e:59:01',
+ 'physical_network': 'physnet4',
+ 'vf_num': 3
+ }, port['binding:profile'])
+
+ def test_create_server_tunnel(self):
+ """Create an instance with a tunnel remote-managed port."""
+
+ hostname = self.start_compute()
+ num_pci = (self.NUM_PFS + self.NUM_VFS) * 2
+ self.assertPCIDeviceCounts(hostname, total=num_pci, free=num_pci)
+
+ dpu_port = self.create_remote_managed_tunnel_port()
+ port = self.neutron.show_port(dpu_port['id'])['port']
+ self.assertNotIn('binding:profile', port)
+
+ self._create_server(networks=[{'port': dpu_port['id']}])
+
+ # Ensure there is one less VF available and that the PF
+ # is no longer usable.
+ self.assertPCIDeviceCounts(hostname, total=num_pci, free=num_pci - 2)
+
+ # Ensure the binding:profile details sent to Neutron are correct after
+ # a port update.
+ port = self.neutron.show_port(dpu_port['id'])['port']
+ self.assertIn('binding:profile', port)
+ self.assertEqual({
+ 'card_serial_number': 'MT0000X00002',
+ 'pci_slot': '0000:82:00.4',
+ 'pci_vendor_info': '15b3:101e',
+ 'pf_mac_address': '52:54:00:1e:59:02',
+ 'physical_network': None,
+ 'vf_num': 3
+ }, port['binding:profile'])
+
+ def _test_common(self, op, *args, **kwargs):
+ self.start_compute()
+ dpu_port = self.create_remote_managed_tunnel_port()
+ server = self._create_server(networks=[{'port': dpu_port['id']}])
+ op(server, *args, **kwargs)
+
+ def test_attach_interface(self):
+ self.start_compute()
+
+ dpu_port = self.create_remote_managed_tunnel_port()
+ server = self._create_server(networks='none')
+
+ self._attach_interface(server, dpu_port['id'])
+
+ port = self.neutron.show_port(dpu_port['id'])['port']
+ self.assertIn('binding:profile', port)
+ self.assertEqual(
+ {
+ 'pci_vendor_info': '15b3:101e',
+ 'pci_slot': '0000:82:00.4',
+ 'physical_network': None,
+ 'pf_mac_address': '52:54:00:1e:59:02',
+ 'vf_num': 3,
+ 'card_serial_number': 'MT0000X00002',
+ },
+ port['binding:profile'],
+ )
+
+ def test_detach_interface(self):
+ self._test_common(self._detach_interface, uuids.dpu_tunnel_port)
+
+ port = self.neutron.show_port(uuids.dpu_tunnel_port)['port']
+ self.assertIn('binding:profile', port)
+ self.assertEqual({}, port['binding:profile'])
+
+ def test_shelve(self):
+ self._test_common(self._shelve_server)
+
+ port = self.neutron.show_port(uuids.dpu_tunnel_port)['port']
+ self.assertIn('binding:profile', port)
+ self.assertEqual({}, port['binding:profile'])
+
+ def test_suspend(self):
+ self.start_compute()
+ dpu_port = self.create_remote_managed_tunnel_port()
+ server = self._create_server(networks=[{'port': dpu_port['id']}])
+ self._suspend_server(server)
+ # TODO(dmitriis): detachDevice does not properly handle hostdevs
+ # so full suspend/resume testing is problematic.
+
+ def _test_move_operation_with_neutron(self, move_operation, dpu_port):
+ """Test a move operation with a remote-managed port.
+ """
+ compute1_pci_info = fakelibvirt.HostPCIDevicesInfo(
+ num_pfs=0, num_vfs=0)
+
+ compute1_pci_info.add_device(
+ dev_type='PF',
+ bus=0x80,
+ slot=0x0,
+ function=0,
+ iommu_group=84,
+ numa_node=1,
+ vf_ratio=self.vf_ratio,
+ vend_id='15b3',
+ vend_name='Mellanox Technologies',
+ prod_id='a2d6',
+ prod_name='MT42822 BlueField-2 integrated ConnectX-6',
+ driver_name='mlx5_core',
+ vpd_fields={
+ 'name': 'MT42822 BlueField-2 integrated ConnectX-6',
+ 'readonly': {
+ 'serial_number': 'MT0000X00042',
+ },
+ }
+ )
+ for idx in range(self.NUM_VFS):
+ compute1_pci_info.add_device(
+ dev_type='VF',
+ bus=0x80,
+ slot=0x0,
+ function=idx + 1,
+ iommu_group=idx + 85,
+ numa_node=1,
+ vf_ratio=self.vf_ratio,
+ parent=(0x80, 0x0, 0),
+ vend_id='15b3',
+ vend_name='Mellanox Technologies',
+ prod_id='101e',
+ prod_name='ConnectX Family mlx5Gen Virtual Function',
+ driver_name='mlx5_core',
+ vpd_fields={
+ 'name': 'MT42822 BlueField-2 integrated ConnectX-6',
+ 'readonly': {
+ 'serial_number': 'MT0000X00042',
+ },
+ }
+ )
+
+ self.start_compute(hostname='test_compute0')
+ self.start_compute(hostname='test_compute1',
+ pci_info=compute1_pci_info)
+
+ port = self.neutron.show_port(dpu_port['id'])['port']
+ self.assertNotIn('binding:profile', port)
+
+ flavor_id = self._create_flavor(vcpu=4)
+ server = self._create_server(
+ flavor_id=flavor_id,
+ networks=[{'port': dpu_port['id']}],
+ host='test_compute0',
+ )
+
+ self.assertEqual('test_compute0', server['OS-EXT-SRV-ATTR:host'])
+ self.assertPCIDeviceCounts('test_compute0', total=10, free=8)
+ self.assertPCIDeviceCounts('test_compute1', total=5, free=5)
+
+ port = self.neutron.show_port(dpu_port['id'])['port']
+ self.assertIn('binding:profile', port)
+ self.assertEqual(
+ {
+ 'pci_vendor_info': '15b3:101e',
+ 'pci_slot': '0000:82:00.4',
+ 'physical_network': None,
+ 'pf_mac_address': '52:54:00:1e:59:02',
+ 'vf_num': 3,
+ 'card_serial_number': 'MT0000X00002',
+ },
+ port['binding:profile'],
+ )
+
+ move_operation(server)
+
+ def test_unshelve_server_with_neutron(self):
+ def move_operation(source_server):
+ self._shelve_server(source_server)
+ # Disable the source compute, to force unshelving on the dest.
+ self.api.put_service(
+ self.computes['test_compute0'].service_ref.uuid,
+ {'status': 'disabled'})
+ self._unshelve_server(source_server)
+
+ dpu_port = self.create_remote_managed_tunnel_port()
+ self._test_move_operation_with_neutron(move_operation, dpu_port)
+
+ self.assertPCIDeviceCounts('test_compute0', total=10, free=10)
+ self.assertPCIDeviceCounts('test_compute1', total=5, free=3)
+
+ # Ensure the binding:profile details got updated, including the
+ # fields relevant to remote-managed ports.
+ port = self.neutron.show_port(dpu_port['id'])['port']
+ self.assertIn('binding:profile', port)
+ self.assertEqual(
+ {
+ 'pci_vendor_info': '15b3:101e',
+ 'pci_slot': '0000:80:00.4',
+ 'physical_network': None,
+ 'pf_mac_address': '52:54:00:1e:59:42',
+ 'vf_num': 3,
+ 'card_serial_number': 'MT0000X00042',
+ },
+ port['binding:profile'],
+ )
+
+ def test_cold_migrate_server_with_neutron(self):
+ def move_operation(source_server):
+ with mock.patch('nova.virt.libvirt.driver.LibvirtDriver'
+ '.migrate_disk_and_power_off', return_value='{}'):
+ server = self._migrate_server(source_server)
+ self._confirm_resize(server)
+
+ self.assertPCIDeviceCounts('test_compute0', total=10, free=10)
+ self.assertPCIDeviceCounts('test_compute1', total=5, free=3)
+
+ # Ensure the binding:profile details got updated, including the
+ # fields relevant to remote-managed ports.
+ port = self.neutron.show_port(dpu_port['id'])['port']
+ self.assertIn('binding:profile', port)
+ self.assertEqual(
+ {
+ 'pci_vendor_info': '15b3:101e',
+ 'pci_slot': '0000:80:00.4',
+ 'physical_network': None,
+ 'pf_mac_address': '52:54:00:1e:59:42',
+ 'vf_num': 3,
+ 'card_serial_number': 'MT0000X00042',
+ },
+ port['binding:profile'],
+ )
+
+ dpu_port = self.create_remote_managed_tunnel_port()
+ self._test_move_operation_with_neutron(move_operation, dpu_port)
+
+ def test_cold_migrate_server_with_neutron_revert(self):
+ def move_operation(source_server):
+ with mock.patch('nova.virt.libvirt.driver.LibvirtDriver'
+ '.migrate_disk_and_power_off', return_value='{}'):
+ server = self._migrate_server(source_server)
+
+ self.assertPCIDeviceCounts('test_compute0', total=10, free=8)
+ self.assertPCIDeviceCounts('test_compute1', total=5, free=3)
+
+ self._revert_resize(server)
+
+ self.assertPCIDeviceCounts('test_compute0', total=10, free=8)
+ self.assertPCIDeviceCounts('test_compute1', total=5, free=5)
+
+ port = self.neutron.show_port(dpu_port['id'])['port']
+ self.assertIn('binding:profile', port)
+ self.assertEqual(
+ {
+ 'pci_vendor_info': '15b3:101e',
+ 'pci_slot': '0000:82:00.4',
+ 'physical_network': None,
+ 'pf_mac_address': '52:54:00:1e:59:02',
+ 'vf_num': 3,
+ 'card_serial_number': 'MT0000X00002',
+ },
+ port['binding:profile'],
+ )
+
+ dpu_port = self.create_remote_managed_tunnel_port()
+ self._test_move_operation_with_neutron(move_operation, dpu_port)
+
+ def test_evacuate_server_with_neutron(self):
+ def move_operation(source_server):
+ # Down the source compute to enable the evacuation
+ self.api.put_service(
+ self.computes['test_compute0'].service_ref.uuid,
+ {'forced_down': True})
+ self.computes['test_compute0'].stop()
+ self._evacuate_server(source_server)
+
+ dpu_port = self.create_remote_managed_tunnel_port()
+ self._test_move_operation_with_neutron(move_operation, dpu_port)
+
+ self.assertPCIDeviceCounts('test_compute0', total=10, free=8)
+ self.assertPCIDeviceCounts('test_compute1', total=5, free=3)
+
+ # Ensure the binding:profile details got updated, including the
+ # fields relevant to remote-managed ports.
+ port = self.neutron.show_port(dpu_port['id'])['port']
+ self.assertIn('binding:profile', port)
+ self.assertEqual(
+ {
+ 'pci_vendor_info': '15b3:101e',
+ 'pci_slot': '0000:80:00.4',
+ 'physical_network': None,
+ 'pf_mac_address': '52:54:00:1e:59:42',
+ 'vf_num': 3,
+ 'card_serial_number': 'MT0000X00042',
+ },
+ port['binding:profile'],
+ )
+
+ def test_live_migrate_server_with_neutron(self):
+ """Live migrate an instance using a remote-managed port.
+
+ This should succeed since we support this via detach and attach of the
+ PCI device similar to how this is done for SR-IOV ports.
+ """
+ def move_operation(source_server):
+ self._live_migrate(source_server, 'completed')
+
+ dpu_port = self.create_remote_managed_tunnel_port()
+ self._test_move_operation_with_neutron(move_operation, dpu_port)
+
+ self.assertPCIDeviceCounts('test_compute0', total=10, free=10)
+ self.assertPCIDeviceCounts('test_compute1', total=5, free=3)
+
+ # Ensure the binding:profile details got updated, including the
+ # fields relevant to remote-managed ports.
+ port = self.neutron.show_port(dpu_port['id'])['port']
+ self.assertIn('binding:profile', port)
+ self.assertEqual(
+ {
+ 'pci_vendor_info': '15b3:101e',
+ 'pci_slot': '0000:80:00.4',
+ 'physical_network': None,
+ 'pf_mac_address': '52:54:00:1e:59:42',
+ 'vf_num': 3,
+ 'card_serial_number': 'MT0000X00042',
+ },
+ port['binding:profile'],
+ )
diff --git a/nova/tests/functional/libvirt/test_power_manage.py b/nova/tests/functional/libvirt/test_power_manage.py
new file mode 100644
index 0000000000..9f80446bd6
--- /dev/null
+++ b/nova/tests/functional/libvirt/test_power_manage.py
@@ -0,0 +1,270 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from unittest import mock
+
+import fixtures
+
+from nova import context as nova_context
+from nova import exception
+from nova import objects
+from nova.tests import fixtures as nova_fixtures
+from nova.tests.fixtures import libvirt as fakelibvirt
+from nova.tests.functional.libvirt import base
+from nova.virt import hardware
+from nova.virt.libvirt.cpu import api as cpu_api
+
+
+class PowerManagementTestsBase(base.ServersTestBase):
+
+ ADDITIONAL_FILTERS = ['NUMATopologyFilter']
+
+ ADMIN_API = True
+
+ def setUp(self):
+ super(PowerManagementTestsBase, self).setUp()
+
+ self.ctxt = nova_context.get_admin_context()
+
+ # Mock the 'NUMATopologyFilter' filter, as most tests need to inspect
+ # this
+ host_manager = self.scheduler.manager.host_manager
+ numa_filter_class = host_manager.filter_cls_map['NUMATopologyFilter']
+ host_pass_mock = mock.Mock(wraps=numa_filter_class().host_passes)
+ _p = mock.patch('nova.scheduler.filters'
+ '.numa_topology_filter.NUMATopologyFilter.host_passes',
+ side_effect=host_pass_mock)
+ self.mock_filter = _p.start()
+ self.addCleanup(_p.stop)
+
+ # for the sake of resizing, we need to patch the two methods below
+ self.useFixture(fixtures.MockPatch(
+ 'nova.virt.libvirt.LibvirtDriver._get_instance_disk_info',
+ return_value=[]))
+ self.useFixture(fixtures.MockPatch('os.rename'))
+
+ self.useFixture(nova_fixtures.PrivsepFixture())
+
+ # Defining the main flavor for 4 vCPUs all pinned
+ self.extra_spec = {
+ 'hw:cpu_policy': 'dedicated',
+ 'hw:cpu_thread_policy': 'prefer',
+ }
+ self.pcpu_flavor_id = self._create_flavor(
+ vcpu=4, extra_spec=self.extra_spec)
+
+ def _assert_server_cpus_state(self, server, expected='online'):
+ inst = objects.Instance.get_by_uuid(self.ctxt, server['id'])
+ if not inst.numa_topology:
+ self.fail('Instance should have a NUMA topology in order to know '
+ 'its physical CPUs')
+ instance_pcpus = inst.numa_topology.cpu_pinning
+ self._assert_cpu_set_state(instance_pcpus, expected=expected)
+ return instance_pcpus
+
+ def _assert_cpu_set_state(self, cpu_set, expected='online'):
+ for i in cpu_set:
+ core = cpu_api.Core(i)
+ if expected == 'online':
+ self.assertTrue(core.online, f'{i} is not online')
+ elif expected == 'offline':
+ self.assertFalse(core.online, f'{i} is online')
+ elif expected == 'powersave':
+ self.assertEqual('powersave', core.governor)
+ elif expected == 'performance':
+ self.assertEqual('performance', core.governor)
+
+
+class PowerManagementTests(PowerManagementTestsBase):
+ """Test suite for a single host with 9 dedicated cores and 1 used for OS"""
+
+ def setUp(self):
+ super(PowerManagementTests, self).setUp()
+
+ self.useFixture(nova_fixtures.SysFileSystemFixture())
+
+ # Definining the CPUs to be pinned.
+ self.flags(cpu_dedicated_set='1-9', cpu_shared_set=None,
+ group='compute')
+ self.flags(vcpu_pin_set=None)
+ self.flags(cpu_power_management=True, group='libvirt')
+
+ self.flags(allow_resize_to_same_host=True)
+ self.host_info = fakelibvirt.HostInfo(cpu_nodes=1, cpu_sockets=1,
+ cpu_cores=5, cpu_threads=2)
+ self.compute1 = self.start_compute(host_info=self.host_info,
+ hostname='compute1')
+
+ # All cores are shutdown at startup, let's check.
+ cpu_dedicated_set = hardware.get_cpu_dedicated_set()
+ self._assert_cpu_set_state(cpu_dedicated_set, expected='offline')
+
+ def test_hardstop_compute_service_if_wrong_opt(self):
+ self.flags(cpu_dedicated_set=None, cpu_shared_set=None,
+ group='compute')
+ self.flags(vcpu_pin_set=None)
+ self.flags(cpu_power_management=True, group='libvirt')
+ self.assertRaises(exception.InvalidConfiguration,
+ self.start_compute, host_info=self.host_info,
+ hostname='compute2')
+
+ def test_create_server(self):
+ server = self._create_server(
+ flavor_id=self.pcpu_flavor_id,
+ expected_state='ACTIVE')
+ # Let's verify that the pinned CPUs are now online
+ self._assert_server_cpus_state(server, expected='online')
+
+ # Verify that the unused CPUs are still offline
+ inst = objects.Instance.get_by_uuid(self.ctxt, server['id'])
+ instance_pcpus = inst.numa_topology.cpu_pinning
+ cpu_dedicated_set = hardware.get_cpu_dedicated_set()
+ unused_cpus = cpu_dedicated_set - instance_pcpus
+ self._assert_cpu_set_state(unused_cpus, expected='offline')
+
+ def test_stop_start_server(self):
+ server = self._create_server(
+ flavor_id=self.pcpu_flavor_id,
+ expected_state='ACTIVE')
+
+ server = self._stop_server(server)
+ # Let's verify that the pinned CPUs are now stopped...
+ self._assert_server_cpus_state(server, expected='offline')
+
+ server = self._start_server(server)
+ # ...and now, they should be back.
+ self._assert_server_cpus_state(server, expected='online')
+
+ def test_resize(self):
+ server = self._create_server(
+ flavor_id=self.pcpu_flavor_id,
+ expected_state='ACTIVE')
+ server_pcpus = self._assert_server_cpus_state(server,
+ expected='online')
+
+ new_flavor_id = self._create_flavor(
+ vcpu=5, extra_spec=self.extra_spec)
+ self._resize_server(server, new_flavor_id)
+ server2_pcpus = self._assert_server_cpus_state(server,
+ expected='online')
+ # Even if the resize is not confirmed yet, the original guest is now
+ # destroyed so the cores are now offline.
+ self._assert_cpu_set_state(server_pcpus, expected='offline')
+
+ # let's revert the resize
+ self._revert_resize(server)
+ # So now the original CPUs will be online again, while the previous
+ # cores should be back offline.
+ self._assert_cpu_set_state(server_pcpus, expected='online')
+ self._assert_cpu_set_state(server2_pcpus, expected='offline')
+
+ def test_changing_strategy_fails(self):
+ # As a reminder, all cores have been shutdown before.
+ # Now we want to change the strategy and then we restart the service
+ self.flags(cpu_power_management_strategy='governor', group='libvirt')
+ # See, this is not possible as we would have offline CPUs.
+ self.assertRaises(exception.InvalidConfiguration,
+ self.restart_compute_service, hostname='compute1')
+
+
+class PowerManagementTestsGovernor(PowerManagementTestsBase):
+ """Test suite for speific governor usage (same 10-core host)"""
+
+ def setUp(self):
+ super(PowerManagementTestsGovernor, self).setUp()
+
+ self.useFixture(nova_fixtures.SysFileSystemFixture())
+
+ # Definining the CPUs to be pinned.
+ self.flags(cpu_dedicated_set='1-9', cpu_shared_set=None,
+ group='compute')
+ self.flags(vcpu_pin_set=None)
+ self.flags(cpu_power_management=True, group='libvirt')
+ self.flags(cpu_power_management_strategy='governor', group='libvirt')
+
+ self.flags(allow_resize_to_same_host=True)
+ self.host_info = fakelibvirt.HostInfo(cpu_nodes=1, cpu_sockets=1,
+ cpu_cores=5, cpu_threads=2)
+ self.compute1 = self.start_compute(host_info=self.host_info,
+ hostname='compute1')
+
+ def test_create(self):
+ cpu_dedicated_set = hardware.get_cpu_dedicated_set()
+ # With the governor strategy, cores are still online but run with a
+ # powersave governor.
+ self._assert_cpu_set_state(cpu_dedicated_set, expected='powersave')
+
+ # Now, start an instance
+ server = self._create_server(
+ flavor_id=self.pcpu_flavor_id,
+ expected_state='ACTIVE')
+ # When pinned cores are run, the governor state is now performance
+ self._assert_server_cpus_state(server, expected='performance')
+
+ def test_changing_strategy_fails(self):
+ # Arbitratly set a core governor strategy to be performance
+ cpu_api.Core(1).set_high_governor()
+ # and then forget about it while changing the strategy.
+ self.flags(cpu_power_management_strategy='cpu_state', group='libvirt')
+ # This time, this wouldn't be acceptable as some core would have a
+ # difference performance while Nova would only online/offline it.
+ self.assertRaises(exception.InvalidConfiguration,
+ self.restart_compute_service, hostname='compute1')
+
+
+class PowerManagementMixedInstances(PowerManagementTestsBase):
+ """Test suite for a single host with 6 dedicated cores, 3 shared and one
+ OS-restricted.
+ """
+
+ def setUp(self):
+ super(PowerManagementMixedInstances, self).setUp()
+
+ self.useFixture(nova_fixtures.SysFileSystemFixture())
+
+ # Definining 6 CPUs to be dedicated, not all of them in a series.
+ self.flags(cpu_dedicated_set='1-3,5-7', cpu_shared_set='4,8-9',
+ group='compute')
+ self.flags(vcpu_pin_set=None)
+ self.flags(cpu_power_management=True, group='libvirt')
+
+ self.host_info = fakelibvirt.HostInfo(cpu_nodes=1, cpu_sockets=1,
+ cpu_cores=5, cpu_threads=2)
+ self.compute1 = self.start_compute(host_info=self.host_info,
+ hostname='compute1')
+
+ # Make sure only 6 are offline now
+ cpu_dedicated_set = hardware.get_cpu_dedicated_set()
+ self._assert_cpu_set_state(cpu_dedicated_set, expected='offline')
+
+ # cores 4 and 8-9 should be online
+ self._assert_cpu_set_state({4, 8, 9}, expected='online')
+
+ def test_standard_server_works_and_passes(self):
+
+ std_flavor_id = self._create_flavor(vcpu=2)
+ self._create_server(flavor_id=std_flavor_id, expected_state='ACTIVE')
+
+ # Since this is an instance with floating vCPUs on the shared set, we
+ # can only lookup the host CPUs and see they haven't changed state.
+ cpu_dedicated_set = hardware.get_cpu_dedicated_set()
+ self._assert_cpu_set_state(cpu_dedicated_set, expected='offline')
+ self._assert_cpu_set_state({4, 8, 9}, expected='online')
+
+ # We can now try to boot an instance with pinned CPUs to test the mix
+ pinned_server = self._create_server(
+ flavor_id=self.pcpu_flavor_id,
+ expected_state='ACTIVE')
+ # We'll see that its CPUs are now online
+ self._assert_server_cpus_state(pinned_server, expected='online')
+ # but it doesn't change the shared set
+ self._assert_cpu_set_state({4, 8, 9}, expected='online')
diff --git a/nova/tests/functional/libvirt/test_report_cpu_traits.py b/nova/tests/functional/libvirt/test_report_cpu_traits.py
index 2386ec5251..99e68b7b5c 100644
--- a/nova/tests/functional/libvirt/test_report_cpu_traits.py
+++ b/nova/tests/functional/libvirt/test_report_cpu_traits.py
@@ -13,11 +13,11 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
import os_resource_classes as orc
import os_traits as ost
-
from nova import conf
from nova.db import constants as db_const
from nova import test
@@ -190,7 +190,6 @@ class LibvirtReportNoSevTraitsTests(LibvirtReportTraitsTestBase):
class LibvirtReportSevTraitsTests(LibvirtReportTraitsTestBase):
STUB_INIT_HOST = False
- @test.patch_exists(SEV_KERNEL_PARAM_FILE, True)
@test.patch_open(SEV_KERNEL_PARAM_FILE, "1\n")
@mock.patch.object(
fakelibvirt.virConnect, '_domain_capability_features',
@@ -198,7 +197,8 @@ class LibvirtReportSevTraitsTests(LibvirtReportTraitsTestBase):
def setUp(self):
super(LibvirtReportSevTraitsTests, self).setUp()
self.flags(num_memory_encrypted_guests=16, group='libvirt')
- self.start_compute()
+ with test.patch_exists(SEV_KERNEL_PARAM_FILE, True):
+ self.start_compute()
def test_sev_trait_on_off(self):
"""Test that the compute service reports the SEV trait in the list of
diff --git a/nova/tests/functional/libvirt/test_reshape.py b/nova/tests/functional/libvirt/test_reshape.py
index 5c73ffbf5f..1f924739e3 100644
--- a/nova/tests/functional/libvirt/test_reshape.py
+++ b/nova/tests/functional/libvirt/test_reshape.py
@@ -12,7 +12,7 @@
# under the License.
import io
-import mock
+from unittest import mock
from oslo_config import cfg
from oslo_log import log as logging
@@ -30,17 +30,7 @@ LOG = logging.getLogger(__name__)
class VGPUReshapeTests(base.ServersTestBase):
- @mock.patch('nova.virt.libvirt.LibvirtDriver._get_local_gb_info',
- return_value={'total': 128,
- 'used': 44,
- 'free': 84})
- @mock.patch('nova.virt.libvirt.driver.libvirt_utils.is_valid_hostname',
- return_value=True)
- @mock.patch('nova.virt.libvirt.driver.libvirt_utils.file_open',
- side_effect=[io.BytesIO(b''), io.BytesIO(b''),
- io.BytesIO(b'')])
- def test_create_servers_with_vgpu(
- self, mock_file_open, mock_valid_hostname, mock_get_fs_info):
+ def test_create_servers_with_vgpu(self):
"""Verify that vgpu reshape works with libvirt driver
1) create two servers with an old tree where the VGPU resource is on
@@ -49,7 +39,8 @@ class VGPUReshapeTests(base.ServersTestBase):
3) check that the allocations of the servers are still valid
4) create another server now against the new tree
"""
-
+ self.mock_file_open.side_effect = [
+ io.BytesIO(b''), io.BytesIO(b''), io.BytesIO(b'')]
# NOTE(gibi): We cannot simply ask the virt driver to create an old
# RP tree with vgpu on the root RP as that code path does not exist
# any more. So we have to hack a "bit". We will create a compute
@@ -81,11 +72,11 @@ class VGPUReshapeTests(base.ServersTestBase):
# ignore the content of the above HostMdevDeviceInfo
self.flags(enabled_mdev_types='', group='devices')
- hostname = self.start_compute(
+ self.hostname = self.start_compute(
hostname='compute1',
mdev_info=fakelibvirt.HostMdevDevicesInfo(devices=mdevs),
)
- self.compute = self.computes[hostname]
+ self.compute = self.computes[self.hostname]
# create the VGPU resource in placement manually
compute_rp_uuid = self.placement.get(
@@ -167,7 +158,7 @@ class VGPUReshapeTests(base.ServersTestBase):
allocations[compute_rp_uuid]['resources'])
# restart compute which will trigger a reshape
- self.compute = self.restart_compute_service(self.compute)
+ self.compute = self.restart_compute_service(self.hostname)
# verify that the inventory, usages and allocation are correct after
# the reshape
diff --git a/nova/tests/functional/libvirt/test_uefi.py b/nova/tests/functional/libvirt/test_uefi.py
index 1eee1ab5e1..40becf425e 100644
--- a/nova/tests/functional/libvirt/test_uefi.py
+++ b/nova/tests/functional/libvirt/test_uefi.py
@@ -14,6 +14,7 @@
# under the License.
import datetime
+import re
from lxml import etree
from oslo_log import log as logging
@@ -47,6 +48,8 @@ class UEFIServersTest(base.ServersTestBase):
orig_create = nova.virt.libvirt.guest.Guest.create
def fake_create(cls, xml, host):
+ xml = re.sub('type arch.*machine',
+ 'type machine', xml)
tree = etree.fromstring(xml)
self.assertXmlEqual(
"""
diff --git a/nova/tests/functional/libvirt/test_vgpu.py b/nova/tests/functional/libvirt/test_vgpu.py
index f25ce44221..686582120a 100644
--- a/nova/tests/functional/libvirt/test_vgpu.py
+++ b/nova/tests/functional/libvirt/test_vgpu.py
@@ -49,11 +49,11 @@ class VGPUTestBase(base.ServersTestBase):
def setUp(self):
super(VGPUTestBase, self).setUp()
- self.useFixture(fixtures.MockPatch(
- 'nova.virt.libvirt.LibvirtDriver._get_local_gb_info',
- return_value={'total': 128,
- 'used': 44,
- 'free': 84}))
+ libvirt_driver.LibvirtDriver._get_local_gb_info.return_value = {
+ 'total': 128,
+ 'used': 44,
+ 'free': 84,
+ }
self.useFixture(fixtures.MockPatch(
'nova.privsep.libvirt.create_mdev',
side_effect=self._create_mdev))
@@ -113,8 +113,8 @@ class VGPUTestBase(base.ServersTestBase):
parent=libvirt_parent)})
return uuid
- def start_compute(self, hostname):
- hostname = super().start_compute(
+ def start_compute_with_vgpu(self, hostname):
+ hostname = self.start_compute(
pci_info=fakelibvirt.HostPCIDevicesInfo(
num_pci=0, num_pfs=0, num_vfs=0, num_mdevcap=2,
),
@@ -197,7 +197,7 @@ class VGPUTests(VGPUTestBase):
enabled_mdev_types=fakelibvirt.NVIDIA_11_VGPU_TYPE,
group='devices')
- self.compute1 = self.start_compute('host1')
+ self.compute1 = self.start_compute_with_vgpu('host1')
def assert_vgpu_usage_for_compute(self, compute, expected):
self.assert_mdev_usage(compute, expected_amount=expected)
@@ -211,7 +211,7 @@ class VGPUTests(VGPUTestBase):
def test_resize_servers_with_vgpu(self):
# Add another compute for the sake of resizing
- self.compute2 = self.start_compute('host2')
+ self.compute2 = self.start_compute_with_vgpu('host2')
server = self._create_server(
image_uuid='155d900f-4e14-4e4c-a73d-069cbf4541e6',
flavor_id=self.flavor, host=self.compute1.host,
@@ -337,7 +337,7 @@ class VGPUMultipleTypesTests(VGPUTestBase):
# Prepare traits for later on
self._create_trait('CUSTOM_NVIDIA_11')
self._create_trait('CUSTOM_NVIDIA_12')
- self.compute1 = self.start_compute('host1')
+ self.compute1 = self.start_compute_with_vgpu('host1')
def test_create_servers_with_vgpu(self):
self._create_server(
@@ -369,13 +369,12 @@ class VGPUMultipleTypesTests(VGPUTestBase):
def test_create_servers_with_specific_type(self):
# Regenerate the PCI addresses so both pGPUs now support nvidia-12
- connection = self.computes[
- self.compute1.host].driver._host.get_connection()
- connection.pci_info = fakelibvirt.HostPCIDevicesInfo(
+ pci_info = fakelibvirt.HostPCIDevicesInfo(
num_pci=0, num_pfs=0, num_vfs=0, num_mdevcap=2,
multiple_gpu_types=True)
# Make a restart to update the Resource Providers
- self.compute1 = self.restart_compute_service(self.compute1)
+ self.compute1 = self.restart_compute_service(
+ self.compute1.host, pci_info=pci_info, keep_hypervisor_state=False)
pgpu1_rp_uuid = self._get_provider_uuid_by_name(
self.compute1.host + '_' + fakelibvirt.MDEVCAP_DEV1_PCI_ADDR)
pgpu2_rp_uuid = self._get_provider_uuid_by_name(
@@ -451,7 +450,7 @@ class DifferentMdevClassesTests(VGPUTestBase):
group='mdev_nvidia-12')
self.flags(mdev_class='CUSTOM_NOTVGPU', group='mdev_mlx5_core')
- self.compute1 = self.start_compute('host1')
+ self.compute1 = self.start_compute_with_vgpu('host1')
# Regenerate the PCI addresses so they can support both mlx5 and
# nvidia-12 types
connection = self.computes[
@@ -460,7 +459,7 @@ class DifferentMdevClassesTests(VGPUTestBase):
num_pci=0, num_pfs=0, num_vfs=0, num_mdevcap=2,
generic_types=True)
# Make a restart to update the Resource Providers
- self.compute1 = self.restart_compute_service(self.compute1)
+ self.compute1 = self.restart_compute_service('host1')
def test_create_servers_with_different_mdev_classes(self):
physdev1_rp_uuid = self._get_provider_uuid_by_name(
@@ -498,7 +497,7 @@ class DifferentMdevClassesTests(VGPUTestBase):
def test_resize_servers_with_mlx5(self):
# Add another compute for the sake of resizing
- self.compute2 = self.start_compute('host2')
+ self.compute2 = self.start_compute_with_vgpu('host2')
# Regenerate the PCI addresses so they can support both mlx5 and
# nvidia-12 types
connection = self.computes[
@@ -507,7 +506,7 @@ class DifferentMdevClassesTests(VGPUTestBase):
num_pci=0, num_pfs=0, num_vfs=0, num_mdevcap=2,
generic_types=True)
# Make a restart to update the Resource Providers
- self.compute2 = self.restart_compute_service(self.compute2)
+ self.compute2 = self.restart_compute_service('host2')
# Use the new flavor for booting
server = self._create_server(
diff --git a/nova/tests/functional/libvirt/test_vpmem.py b/nova/tests/functional/libvirt/test_vpmem.py
index d1cad0e376..cb524fe8b6 100644
--- a/nova/tests/functional/libvirt/test_vpmem.py
+++ b/nova/tests/functional/libvirt/test_vpmem.py
@@ -12,9 +12,11 @@
# under the License.
import fixtures
+from unittest import mock
from oslo_config import cfg
from oslo_log import log as logging
+from oslo_utils.fixture import uuidsentinel as uuids
from nova import objects
from nova.tests import fixtures as nova_fixtures
@@ -75,6 +77,7 @@ class VPMEMTestBase(integrated_helpers.LibvirtProviderUsageBaseTestCase):
'nova.privsep.libvirt.get_pmem_namespaces',
return_value=self.fake_pmem_namespaces))
self.useFixture(nova_fixtures.LibvirtImageBackendFixture())
+ self.useFixture(nova_fixtures.CGroupsFixture())
self.useFixture(fixtures.MockPatch(
'nova.virt.libvirt.LibvirtDriver._get_local_gb_info',
return_value={'total': 128,
@@ -99,7 +102,9 @@ class VPMEMTestBase(integrated_helpers.LibvirtProviderUsageBaseTestCase):
cpu_cores=2, cpu_threads=2),
hostname=hostname)
self.mock_conn.return_value = fake_connection
- compute = self._start_compute(host=hostname)
+ with mock.patch('nova.virt.node.get_local_node_uuid') as m:
+ m.return_value = str(getattr(uuids, 'node_%s' % hostname))
+ compute = self._start_compute(host=hostname)
# Ensure populating the existing pmems correctly.
vpmems = compute.driver._vpmems_by_name
diff --git a/nova/tests/functional/libvirt/test_vtpm.py b/nova/tests/functional/libvirt/test_vtpm.py
index c07c38f02d..3b5ae9a60f 100644
--- a/nova/tests/functional/libvirt/test_vtpm.py
+++ b/nova/tests/functional/libvirt/test_vtpm.py
@@ -13,7 +13,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from castellan.common.objects import passphrase
from castellan.key_manager import key_manager
@@ -128,7 +128,7 @@ class VTPMServersTest(base.ServersTestBase):
# the presence of users on the host, none of which makes sense here
_p = mock.patch(
'nova.virt.libvirt.driver.LibvirtDriver._check_vtpm_support')
- self.mock_conn = _p.start()
+ _p.start()
self.addCleanup(_p.stop)
self.key_mgr = crypto._get_key_manager()
diff --git a/nova/tests/functional/notification_sample_tests/notification_sample_base.py b/nova/tests/functional/notification_sample_tests/notification_sample_base.py
index 72291e55cd..d987ff127c 100644
--- a/nova/tests/functional/notification_sample_tests/notification_sample_base.py
+++ b/nova/tests/functional/notification_sample_tests/notification_sample_base.py
@@ -12,9 +12,9 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
import os
import time
+from unittest import mock
from oslo_config import cfg
from oslo_serialization import jsonutils
diff --git a/nova/tests/functional/notification_sample_tests/test_compute_task.py b/nova/tests/functional/notification_sample_tests/test_compute_task.py
index 3de1c7d4e1..05d2d32fde 100644
--- a/nova/tests/functional/notification_sample_tests/test_compute_task.py
+++ b/nova/tests/functional/notification_sample_tests/test_compute_task.py
@@ -10,6 +10,9 @@
# License for the specific language governing permissions and limitations
# under the License.
+from unittest import mock
+
+from nova import objects
from nova.tests import fixtures
from nova.tests.functional.notification_sample_tests \
import notification_sample_base
@@ -53,6 +56,10 @@ class TestComputeTaskNotificationSample(
},
actual=self.notifier.versioned_notifications[1])
+ @mock.patch.object(
+ objects.service, 'get_minimum_version_all_cells',
+ new=mock.Mock(return_value=62)
+ )
def test_rebuild_fault(self):
server = self._boot_a_server(
extra_params={'networks': [{'port': self.neutron.port_1['id']}]},
diff --git a/nova/tests/functional/notification_sample_tests/test_instance.py b/nova/tests/functional/notification_sample_tests/test_instance.py
index 710b2a71fb..5a52c2dad6 100644
--- a/nova/tests/functional/notification_sample_tests/test_instance.py
+++ b/nova/tests/functional/notification_sample_tests/test_instance.py
@@ -11,8 +11,8 @@
# under the License.
import time
+from unittest import mock
-import mock
from nova import exception
from nova.tests import fixtures
@@ -46,18 +46,18 @@ class TestInstanceNotificationSampleWithMultipleCompute(
self.compute2 = self.start_service('compute', host='host2')
actions = [
- self._test_live_migration_rollback,
- self._test_live_migration_abort,
- self._test_live_migration_success,
- self._test_evacuate_server,
- self._test_live_migration_force_complete
+ (self._test_live_migration_rollback, 'ACTIVE'),
+ (self._test_live_migration_abort, 'ACTIVE'),
+ (self._test_live_migration_success, 'ACTIVE'),
+ (self._test_evacuate_server, 'SHUTOFF'),
+ (self._test_live_migration_force_complete, 'ACTIVE'),
]
- for action in actions:
+ for action, expected_state in actions:
self.notifier.reset()
action(server)
# Ensure that instance is in active state after an action
- self._wait_for_state_change(server, 'ACTIVE')
+ self._wait_for_state_change(server, expected_state)
@mock.patch('nova.compute.manager.ComputeManager.'
'_live_migration_cleanup_flags', return_value=[True, False])
@@ -193,7 +193,7 @@ class TestInstanceNotificationSampleWithMultipleCompute(
self.admin_api.delete_migration(server['id'], migrations[0]['id'])
self._wait_for_notification('instance.live_migration_abort.start')
self._wait_for_state_change(server, 'ACTIVE')
- # NOTE(gibi): the intance.live_migration_rollback notification emitted
+ # NOTE(gibi): the instance.live_migration_rollback notification emitted
# after the instance.live_migration_abort notification so we have to
# wait for the rollback to ensure we can assert both notifications
# below
@@ -275,6 +275,12 @@ class TestInstanceNotificationSampleWithMultipleCompute(
self.admin_api.put_service(service_id, {'forced_down': False})
def _test_live_migration_force_complete(self, server):
+ # In the scenario evacuate happened before which stopped the
+ # server.
+ self._start_server(server)
+ self._wait_for_state_change(server, 'ACTIVE')
+ self.notifier.reset()
+
post = {
'os-migrateLive': {
'host': 'host2',
@@ -1231,7 +1237,7 @@ class TestInstanceNotificationSample(
'nova_object.data': {},
'nova_object.name': 'ImageMetaPropsPayload',
'nova_object.namespace': 'nova',
- 'nova_object.version': '1.8',
+ 'nova_object.version': '1.12',
},
'image.size': 58145823,
'image.tags': [],
@@ -1327,7 +1333,7 @@ class TestInstanceNotificationSample(
'nova_object.data': {},
'nova_object.name': 'ImageMetaPropsPayload',
'nova_object.namespace': 'nova',
- 'nova_object.version': '1.8',
+ 'nova_object.version': '1.12',
},
'image.size': 58145823,
'image.tags': [],
@@ -1500,8 +1506,8 @@ class TestInstanceNotificationSample(
self.api.delete_server_volume(server['id'], volume_id)
self._wait_for_notification('instance.volume_detach.end')
- def _volume_swap_server(self, server, attachement_id, volume_id):
- self.api.put_server_volume(server['id'], attachement_id, volume_id)
+ def _volume_swap_server(self, server, attachment_id, volume_id):
+ self.api.put_server_volume(server['id'], attachment_id, volume_id)
def test_volume_swap_server(self):
server = self._boot_a_server(
diff --git a/nova/tests/functional/notification_sample_tests/test_keypair.py b/nova/tests/functional/notification_sample_tests/test_keypair.py
index b2481f1b2a..01c59b0f36 100644
--- a/nova/tests/functional/notification_sample_tests/test_keypair.py
+++ b/nova/tests/functional/notification_sample_tests/test_keypair.py
@@ -16,7 +16,12 @@ from nova.tests.functional.notification_sample_tests \
class TestKeypairNotificationSample(
notification_sample_base.NotificationSampleTestBase):
+ api_major_version = 'v2.1'
+ microversion = 'latest'
+
def test_keypair_create_delete(self):
+ # Keypair generation is no longer supported with 2.92 microversion.
+ self.api.microversion = '2.91'
keypair_req = {
"keypair": {
"name": "my-key",
diff --git a/nova/tests/functional/notification_sample_tests/test_libvirt.py b/nova/tests/functional/notification_sample_tests/test_libvirt.py
index 8106edd44a..feed05a64c 100644
--- a/nova/tests/functional/notification_sample_tests/test_libvirt.py
+++ b/nova/tests/functional/notification_sample_tests/test_libvirt.py
@@ -12,8 +12,9 @@
# License for the specific language governing permissions and limitations
# under the License.
+from unittest import mock
+
import fixtures
-import mock
import nova.conf
from nova import exception
diff --git a/nova/tests/functional/regressions/test_bug_1554631.py b/nova/tests/functional/regressions/test_bug_1554631.py
index 2db5e37b91..25a4613e72 100644
--- a/nova/tests/functional/regressions/test_bug_1554631.py
+++ b/nova/tests/functional/regressions/test_bug_1554631.py
@@ -12,8 +12,9 @@
# License for the specific language governing permissions and limitations
# under the License.
+from unittest import mock
+
from cinderclient import exceptions as cinder_exceptions
-import mock
from nova import test
from nova.tests import fixtures as nova_fixtures
diff --git a/nova/tests/functional/regressions/test_bug_1595962.py b/nova/tests/functional/regressions/test_bug_1595962.py
index ebdf82f21a..9232eea335 100644
--- a/nova/tests/functional/regressions/test_bug_1595962.py
+++ b/nova/tests/functional/regressions/test_bug_1595962.py
@@ -13,10 +13,10 @@
# under the License.
import time
+from unittest import mock
import fixtures
import io
-import mock
from nova import test
from nova.tests import fixtures as nova_fixtures
@@ -47,6 +47,7 @@ class TestSerialConsoleLiveMigrate(test.TestCase):
'nova.virt.libvirt.guest.libvirt',
fakelibvirt))
self.useFixture(nova_fixtures.LibvirtFixture())
+ self.useFixture(nova_fixtures.CGroupsFixture())
self.admin_api = api_fixture.admin_api
self.api = api_fixture.api
diff --git a/nova/tests/functional/regressions/test_bug_1628606.py b/nova/tests/functional/regressions/test_bug_1628606.py
new file mode 100644
index 0000000000..0fccd78cce
--- /dev/null
+++ b/nova/tests/functional/regressions/test_bug_1628606.py
@@ -0,0 +1,60 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova import test
+from nova.tests import fixtures as nova_fixtures
+from nova.tests.functional.api import client
+from nova.tests.functional import fixtures as func_fixtures
+from nova.tests.functional import integrated_helpers
+from unittest import mock
+
+
+class PostLiveMigrationFail(
+ test.TestCase, integrated_helpers.InstanceHelperMixin):
+ """Regression test for bug 1628606
+ """
+
+ def setUp(self):
+ super().setUp()
+ self.useFixture(nova_fixtures.NeutronFixture(self))
+ self.glance = self.useFixture(nova_fixtures.GlanceFixture(self))
+ self.useFixture(func_fixtures.PlacementFixture())
+ self.useFixture(nova_fixtures.HostNameWeigherFixture())
+
+ self.start_service('conductor')
+ self.start_service('scheduler')
+
+ api_fixture = self.useFixture(nova_fixtures.OSAPIFixture(
+ api_version='v2.1'))
+
+ self.api = api_fixture.admin_api
+ self.api.microversion = 'latest'
+
+ self.src = self._start_compute(host='host1')
+ self.dest = self._start_compute(host='host2')
+
+ @mock.patch(
+ 'nova.compute.manager.ComputeManager'
+ '._post_live_migration_remove_source_vol_connections')
+ def test_post_live_migration(self, mock_migration):
+ server = self._create_server(networks=[])
+ self.assertEqual(self.src.host, server['OS-EXT-SRV-ATTR:host'])
+
+ error = client.OpenStackApiException(
+ "Failed to remove source vol connection post live migration")
+ mock_migration.side_effect = error
+
+ server = self._live_migrate(
+ server, migration_expected_state='error',
+ server_expected_state='ERROR')
+
+ self.assertEqual(self.dest.host, server['OS-EXT-SRV-ATTR:host'])
diff --git a/nova/tests/functional/regressions/test_bug_1669054.py b/nova/tests/functional/regressions/test_bug_1669054.py
index 6180dbfbaa..b20e1530cc 100644
--- a/nova/tests/functional/regressions/test_bug_1669054.py
+++ b/nova/tests/functional/regressions/test_bug_1669054.py
@@ -59,7 +59,8 @@ class ResizeEvacuateTestCase(integrated_helpers._IntegratedTestBase):
# Now try to evacuate the server back to the original source compute.
server = self._evacuate_server(
server, {'onSharedStorage': 'False'},
- expected_host=self.compute.host, expected_migration_status='done')
+ expected_host=self.compute.host, expected_migration_status='done',
+ expected_state='ACTIVE')
# Assert the RequestSpec.ignore_hosts field is not populated.
reqspec = objects.RequestSpec.get_by_instance_uuid(
diff --git a/nova/tests/functional/regressions/test_bug_1713783.py b/nova/tests/functional/regressions/test_bug_1713783.py
index 9a6a79d7a2..8088ccfe06 100644
--- a/nova/tests/functional/regressions/test_bug_1713783.py
+++ b/nova/tests/functional/regressions/test_bug_1713783.py
@@ -13,9 +13,11 @@
# limitations under the License.
import time
+from unittest import mock
from oslo_log import log as logging
+from nova import objects
from nova import test
from nova.tests import fixtures as nova_fixtures
from nova.tests.functional import fixtures as func_fixtures
@@ -81,6 +83,10 @@ class FailedEvacuateStateTests(test.TestCase,
created_server = self.api.post_server({'server': server_req})
return self._wait_for_state_change(created_server, 'ACTIVE')
+ @mock.patch.object(
+ objects.service, 'get_minimum_version_all_cells',
+ new=mock.Mock(return_value=62)
+ )
def test_evacuate_no_valid_host(self):
# Boot a server
server = self._boot_a_server()
diff --git a/nova/tests/functional/regressions/test_bug_1732947.py b/nova/tests/functional/regressions/test_bug_1732947.py
index 3637f40bc2..db518fa8ce 100644
--- a/nova/tests/functional/regressions/test_bug_1732947.py
+++ b/nova/tests/functional/regressions/test_bug_1732947.py
@@ -28,7 +28,9 @@ class RebuildVolumeBackedSameImage(integrated_helpers._IntegratedTestBase):
original image.
"""
api_major_version = 'v2.1'
- microversion = 'latest'
+ # We need microversion <=2.93 to get the old BFV rebuild behavior
+ # that was the environment for this regression.
+ microversion = '2.92'
def _setup_scheduler_service(self):
# Add the IsolatedHostsFilter to the list of enabled filters since it
diff --git a/nova/tests/functional/regressions/test_bug_1764883.py b/nova/tests/functional/regressions/test_bug_1764883.py
index aa86770584..59bbed4f46 100644
--- a/nova/tests/functional/regressions/test_bug_1764883.py
+++ b/nova/tests/functional/regressions/test_bug_1764883.py
@@ -95,7 +95,8 @@ class TestEvacuationWithSourceReturningDuringRebuild(
# Evacuate the instance from the source_host
server = self._evacuate_server(
- server, expected_migration_status='done')
+ server, expected_migration_status='done',
+ expected_state='ACTIVE')
host = server['OS-EXT-SRV-ATTR:host']
migrations = self.api.get_migrations()
diff --git a/nova/tests/functional/regressions/test_bug_1781286.py b/nova/tests/functional/regressions/test_bug_1781286.py
index 7b2d603092..c123fd9214 100644
--- a/nova/tests/functional/regressions/test_bug_1781286.py
+++ b/nova/tests/functional/regressions/test_bug_1781286.py
@@ -10,8 +10,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import fixtures
-import mock
+from unittest import mock
+
from oslo_db import exception as oslo_db_exc
from nova.compute import manager as compute_manager
@@ -67,11 +67,11 @@ class RescheduleBuildAvailabilityZoneUpCall(
def wrap_bari(*args, **kwargs):
# Poison the AZ query to blow up as if the cell conductor does not
# have access to the API DB.
- self.useFixture(
- fixtures.MockPatch(
- 'nova.objects.AggregateList.get_by_host',
- side_effect=oslo_db_exc.CantStartEngineError))
- return original_bari(*args, **kwargs)
+ with mock.patch(
+ 'nova.objects.AggregateList.get_by_host',
+ side_effect=oslo_db_exc.CantStartEngineError
+ ):
+ return original_bari(*args, **kwargs)
self.stub_out('nova.compute.manager.ComputeManager.'
'build_and_run_instance', wrap_bari)
@@ -81,10 +81,6 @@ class RescheduleBuildAvailabilityZoneUpCall(
# compute service we have to wait for the notification that the build
# is complete and then stop the mock so we can use the API again.
self.notifier.wait_for_versioned_notifications('instance.create.end')
- # Note that we use stopall here because we actually called
- # build_and_run_instance twice so we have more than one instance of
- # the mock that needs to be stopped.
- mock.patch.stopall()
server = self._wait_for_state_change(server, 'ACTIVE')
# We should have rescheduled and the instance AZ should be set from the
# Selection object. Since neither compute host is in an AZ, the server
@@ -128,19 +124,20 @@ class RescheduleMigrateAvailabilityZoneUpCall(
self.rescheduled = None
def wrap_prep_resize(_self, *args, **kwargs):
- # Poison the AZ query to blow up as if the cell conductor does not
- # have access to the API DB.
- self.agg_mock = self.useFixture(
- fixtures.MockPatch(
- 'nova.objects.AggregateList.get_by_host',
- side_effect=oslo_db_exc.CantStartEngineError)).mock
if self.rescheduled is None:
# Track the first host that we rescheduled from.
self.rescheduled = _self.host
# Trigger a reschedule.
raise exception.ComputeResourcesUnavailable(
reason='test_migrate_reschedule_blocked_az_up_call')
- return original_prep_resize(_self, *args, **kwargs)
+ # Poison the AZ query to blow up as if the cell conductor does not
+ # have access to the API DB.
+ with mock.patch(
+ 'nova.objects.AggregateList.get_by_host',
+ side_effect=oslo_db_exc.CantStartEngineError,
+ ) as agg_mock:
+ self.agg_mock = agg_mock
+ return original_prep_resize(_self, *args, **kwargs)
self.stub_out('nova.compute.manager.ComputeManager._prep_resize',
wrap_prep_resize)
diff --git a/nova/tests/functional/regressions/test_bug_1823370.py b/nova/tests/functional/regressions/test_bug_1823370.py
index 5e69905f5f..af134070cd 100644
--- a/nova/tests/functional/regressions/test_bug_1823370.py
+++ b/nova/tests/functional/regressions/test_bug_1823370.py
@@ -66,4 +66,5 @@ class MultiCellEvacuateTestCase(integrated_helpers._IntegratedTestBase):
# higher than host3.
self._evacuate_server(
server, {'onSharedStorage': 'False'}, expected_host='host3',
- expected_migration_status='done')
+ expected_migration_status='done',
+ expected_state='ACTIVE')
diff --git a/nova/tests/functional/regressions/test_bug_1830747.py b/nova/tests/functional/regressions/test_bug_1830747.py
index 4cd8c3b1af..a28c896b99 100644
--- a/nova/tests/functional/regressions/test_bug_1830747.py
+++ b/nova/tests/functional/regressions/test_bug_1830747.py
@@ -10,7 +10,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from nova.conductor import api as conductor_api
from nova import context as nova_context
diff --git a/nova/tests/functional/regressions/test_bug_1831771.py b/nova/tests/functional/regressions/test_bug_1831771.py
index 2ec448b249..11e3ec7682 100644
--- a/nova/tests/functional/regressions/test_bug_1831771.py
+++ b/nova/tests/functional/regressions/test_bug_1831771.py
@@ -13,8 +13,8 @@
# under the License.
import collections
+from unittest import mock
-import mock
from nova.compute import task_states
from nova.compute import vm_states
diff --git a/nova/tests/functional/regressions/test_bug_1843090.py b/nova/tests/functional/regressions/test_bug_1843090.py
index ed02d59cb4..72793cc0bc 100644
--- a/nova/tests/functional/regressions/test_bug_1843090.py
+++ b/nova/tests/functional/regressions/test_bug_1843090.py
@@ -9,7 +9,7 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
import nova.compute
from nova import exception
diff --git a/nova/tests/functional/regressions/test_bug_1843708.py b/nova/tests/functional/regressions/test_bug_1843708.py
index 2ae725a5eb..2eda92125b 100644
--- a/nova/tests/functional/regressions/test_bug_1843708.py
+++ b/nova/tests/functional/regressions/test_bug_1843708.py
@@ -15,6 +15,7 @@
from nova import context
from nova import objects
from nova.tests.functional import integrated_helpers
+from nova.tests.unit import fake_crypto
class RebuildWithKeypairTestCase(integrated_helpers._IntegratedTestBase):
@@ -26,14 +27,19 @@ class RebuildWithKeypairTestCase(integrated_helpers._IntegratedTestBase):
microversion = 'latest'
def test_rebuild_with_keypair(self):
+ pub_key1 = fake_crypto.get_ssh_public_key()
+
keypair_req = {
'keypair': {
'name': 'test-key1',
'type': 'ssh',
+ 'public_key': pub_key1,
},
}
keypair1 = self.api.post_keypair(keypair_req)
+ pub_key2 = fake_crypto.get_ssh_public_key()
keypair_req['keypair']['name'] = 'test-key2'
+ keypair_req['keypair']['public_key'] = pub_key2
keypair2 = self.api.post_keypair(keypair_req)
server = self._build_server(networks='none')
diff --git a/nova/tests/functional/regressions/test_bug_1845291.py b/nova/tests/functional/regressions/test_bug_1845291.py
index 101774416a..e5e9c953a6 100644
--- a/nova/tests/functional/regressions/test_bug_1845291.py
+++ b/nova/tests/functional/regressions/test_bug_1845291.py
@@ -9,7 +9,7 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
import nova
from nova import exception
diff --git a/nova/tests/functional/regressions/test_bug_1849165.py b/nova/tests/functional/regressions/test_bug_1849165.py
index f2a7f82ee9..1d4cf2eece 100644
--- a/nova/tests/functional/regressions/test_bug_1849165.py
+++ b/nova/tests/functional/regressions/test_bug_1849165.py
@@ -9,7 +9,7 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
import nova
from nova.tests.functional import integrated_helpers
diff --git a/nova/tests/functional/regressions/test_bug_1853009.py b/nova/tests/functional/regressions/test_bug_1853009.py
index 2ec69482a2..5266e6166b 100644
--- a/nova/tests/functional/regressions/test_bug_1853009.py
+++ b/nova/tests/functional/regressions/test_bug_1853009.py
@@ -10,7 +10,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from nova import context
from nova import objects
diff --git a/nova/tests/functional/regressions/test_bug_1862633.py b/nova/tests/functional/regressions/test_bug_1862633.py
index 5cfcc75ab2..021093cf59 100644
--- a/nova/tests/functional/regressions/test_bug_1862633.py
+++ b/nova/tests/functional/regressions/test_bug_1862633.py
@@ -9,8 +9,8 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
-import mock
from neutronclient.common import exceptions as neutron_exception
+from unittest import mock
from nova import test
from nova.tests import fixtures as nova_fixtures
diff --git a/nova/tests/functional/regressions/test_bug_1879878.py b/nova/tests/functional/regressions/test_bug_1879878.py
index 3a21c5c11d..c50f8ac92e 100644
--- a/nova/tests/functional/regressions/test_bug_1879878.py
+++ b/nova/tests/functional/regressions/test_bug_1879878.py
@@ -10,8 +10,9 @@
# License for the specific language governing permissions and limitations
# under the License.
+from unittest import mock
+
import ddt
-import mock
from nova.compute import resource_tracker as rt
from nova import context as nova_context
diff --git a/nova/tests/functional/regressions/test_bug_1888395.py b/nova/tests/functional/regressions/test_bug_1888395.py
index 36eb0e0f52..c50b78e2f6 100644
--- a/nova/tests/functional/regressions/test_bug_1888395.py
+++ b/nova/tests/functional/regressions/test_bug_1888395.py
@@ -23,14 +23,8 @@ from nova.tests.fixtures import libvirt as fakelibvirt
from nova.tests.functional.libvirt import base as libvirt_base
-class TestLiveMigrationWithoutMultiplePortBindings(
+class TestLiveMigrationWithoutMultiplePortBindingsBase(
libvirt_base.ServersTestBase):
- """Regression test for bug 1888395.
-
- This regression test asserts that Live migration works when
- neutron does not support the binding-extended api extension
- and the legacy single port binding workflow is used.
- """
ADMIN_API = True
microversion = 'latest'
@@ -72,6 +66,16 @@ class TestLiveMigrationWithoutMultiplePortBindings(
'nova.tests.fixtures.libvirt.Domain.migrateToURI3',
self._migrate_stub))
+
+class TestLiveMigrationWithoutMultiplePortBindings(
+ TestLiveMigrationWithoutMultiplePortBindingsBase):
+ """Regression test for bug 1888395.
+
+ This regression test asserts that Live migration works when
+ neutron does not support the binding-extended api extension
+ and the legacy single port binding workflow is used.
+ """
+
def _migrate_stub(self, domain, destination, params, flags):
"""Stub out migrateToURI3."""
@@ -108,7 +112,7 @@ class TestLiveMigrationWithoutMultiplePortBindings(
networks=[{'port': self.neutron.port_1['id']}])
self.assertFalse(
- self.neutron_api.supports_port_binding_extension(self.ctxt))
+ self.neutron_api.has_port_binding_extension(self.ctxt))
# TODO(sean-k-mooney): extend _live_migrate to support passing a host
self.api.post_server_action(
server['id'],
@@ -124,3 +128,25 @@ class TestLiveMigrationWithoutMultiplePortBindings(
server, {'OS-EXT-SRV-ATTR:host': 'end_host', 'status': 'ACTIVE'})
msg = "NotImplementedError: Cannot load 'vif_type' in the base class"
self.assertNotIn(msg, self.stdlog.logger.output)
+
+
+class TestLiveMigrationRollbackWithoutMultiplePortBindings(
+ TestLiveMigrationWithoutMultiplePortBindingsBase):
+
+ def _migrate_stub(self, domain, destination, params, flags):
+ source = self.computes['start_host']
+ conn = source.driver._host.get_connection()
+ dom = conn.lookupByUUIDString(self.server['id'])
+ dom.fail_job()
+
+ def test_live_migration_rollback(self):
+ self.server = self._create_server(
+ host='start_host',
+ networks=[{'port': self.neutron.port_1['id']}])
+
+ self.assertFalse(
+ self.neutron_api.has_port_binding_extension(self.ctxt))
+ # NOTE(artom) The live migration will still fail (we fail it in
+ # _migrate_stub()), but the server should correctly rollback to ACTIVE.
+ self._live_migrate(self.server, migration_expected_state='failed',
+ server_expected_state='ACTIVE')
diff --git a/nova/tests/functional/regressions/test_bug_1889108.py b/nova/tests/functional/regressions/test_bug_1889108.py
index 0e847e81ab..9ec67e4bf7 100644
--- a/nova/tests/functional/regressions/test_bug_1889108.py
+++ b/nova/tests/functional/regressions/test_bug_1889108.py
@@ -10,7 +10,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from nova import test
from nova.tests import fixtures as nova_fixtures
diff --git a/nova/tests/functional/regressions/test_bug_1890244.py b/nova/tests/functional/regressions/test_bug_1890244.py
new file mode 100644
index 0000000000..bf969eebe7
--- /dev/null
+++ b/nova/tests/functional/regressions/test_bug_1890244.py
@@ -0,0 +1,96 @@
+# Copyright 2017 Ericsson
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from nova import context
+from nova import objects
+from nova import test
+from nova.tests import fixtures as nova_fixtures
+from nova.tests.functional import fixtures as func_fixtures
+from nova.tests.functional import integrated_helpers
+
+
+class IgnoreDeletedServerGroupsTest(
+ test.TestCase, integrated_helpers.InstanceHelperMixin,
+):
+ """Regression test for bug 1890244
+
+ If instance are created as member of server groups it
+ should be possibel to evacuate them if the server groups are
+ deleted prior to the host failure.
+ """
+
+ def setUp(self):
+ super().setUp()
+ # Stub out external dependencies.
+ self.useFixture(nova_fixtures.NeutronFixture(self))
+ self.useFixture(nova_fixtures.GlanceFixture(self))
+ self.useFixture(func_fixtures.PlacementFixture())
+ # Start nova controller services.
+ api_fixture = self.useFixture(nova_fixtures.OSAPIFixture(
+ api_version='v2.1'))
+ self.api = api_fixture.admin_api
+ self.start_service('conductor')
+ # Use a custom weigher to make sure that we have a predictable
+ # scheduling sort order.
+ self.useFixture(nova_fixtures.HostNameWeigherFixture())
+ self.start_service('scheduler')
+ # Start two computes, one where the server will be created and another
+ # where we'll evacuate it to.
+ self.src = self._start_compute('host1')
+ self.dest = self._start_compute('host2')
+ self.notifier = self.useFixture(
+ nova_fixtures.NotificationFixture(self)
+ )
+
+ def test_evacuate_after_group_delete(self):
+ # Create an anti-affinity group for the server.
+ body = {
+ 'server_group': {
+ 'name': 'test-group',
+ 'policies': ['anti-affinity']
+ }
+ }
+ group_id = self.api.api_post(
+ '/os-server-groups', body).body['server_group']['id']
+
+ # Create a server in the group which should land on host1 due to our
+ # custom weigher.
+ body = {'server': self._build_server()}
+ body['os:scheduler_hints'] = {'group': group_id}
+ server = self.api.post_server(body)
+ server = self._wait_for_state_change(server, 'ACTIVE')
+ self.assertEqual('host1', server['OS-EXT-SRV-ATTR:host'])
+
+ # Down the source compute to enable the evacuation
+ self.api.microversion = '2.11' # Cap for the force-down call.
+ self.api.force_down_service('host1', 'nova-compute', True)
+ self.api.microversion = 'latest'
+ self.src.stop()
+
+ # assert the server currently has a server group
+ reqspec = objects.RequestSpec.get_by_instance_uuid(
+ context.get_admin_context(), server['id'])
+ self.assertIsNotNone(reqspec.instance_group)
+ self.assertIn('group', reqspec.scheduler_hints)
+ # then delete it so that we need to clean it up on evac
+ self.api.api_delete(f'/os-server-groups/{group_id}')
+
+ # Initiate evacuation
+ server = self._evacuate_server(
+ server, expected_host='host2', expected_migration_status='done'
+ )
+ reqspec = objects.RequestSpec.get_by_instance_uuid(
+ context.get_admin_context(), server['id'])
+ self.assertIsNone(reqspec.instance_group)
+ self.assertNotIn('group', reqspec.scheduler_hints)
diff --git a/nova/tests/functional/regressions/test_bug_1893284.py b/nova/tests/functional/regressions/test_bug_1893284.py
index b7ca848c4d..ccb12f50b7 100644
--- a/nova/tests/functional/regressions/test_bug_1893284.py
+++ b/nova/tests/functional/regressions/test_bug_1893284.py
@@ -25,7 +25,7 @@ class TestServersPerUserQuota(test.TestCase,
tracking usages in a separate database table. As part of that change,
per-user quota functionality was broken for server creates.
- When mulitple users in the same project have per-user quota, they are meant
+ When multiple users in the same project have per-user quota, they are meant
to be allowed to create resources such that may not exceed their
per-user quota nor their project quota.
diff --git a/nova/tests/functional/regressions/test_bug_1896463.py b/nova/tests/functional/regressions/test_bug_1896463.py
index dc74791e0e..3cfece8d36 100644
--- a/nova/tests/functional/regressions/test_bug_1896463.py
+++ b/nova/tests/functional/regressions/test_bug_1896463.py
@@ -216,7 +216,7 @@ class TestEvacuateResourceTrackerRace(
self._run_periodics()
self._wait_for_server_parameter(
- server, {'OS-EXT-SRV-ATTR:host': 'host2', 'status': 'ACTIVE'})
+ server, {'OS-EXT-SRV-ATTR:host': 'host2', 'status': 'SHUTOFF'})
self._assert_pci_device_allocated(server['id'], self.compute1_id)
self._assert_pci_device_allocated(server['id'], self.compute2_id)
diff --git a/nova/tests/functional/regressions/test_bug_1899835.py b/nova/tests/functional/regressions/test_bug_1899835.py
index 4713763f0f..ad4d315659 100644
--- a/nova/tests/functional/regressions/test_bug_1899835.py
+++ b/nova/tests/functional/regressions/test_bug_1899835.py
@@ -10,7 +10,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from nova import context
from nova import objects
diff --git a/nova/tests/functional/regressions/test_bug_1902925.py b/nova/tests/functional/regressions/test_bug_1902925.py
index f0e823e2a4..59105c6cc6 100644
--- a/nova/tests/functional/regressions/test_bug_1902925.py
+++ b/nova/tests/functional/regressions/test_bug_1902925.py
@@ -28,6 +28,11 @@ class ComputeVersion5xPinnedRpcTests(integrated_helpers._IntegratedTestBase):
self.compute1 = self._start_compute(host='host1')
def _test_rebuild_instance_with_compute_rpc_pin(self, version_cap):
+ # Since passing the latest microversion (>= 2.93) passes
+ # the 'reimage_boot_volume' parameter as True and it is
+ # not acceptable with compute RPC version (required 6.1)
+ # These tests fail, so assigning microversion to 2.92
+ self.api.microversion = '2.92'
self.flags(compute=version_cap, group='upgrade_levels')
server_req = self._build_server(networks='none')
diff --git a/nova/tests/functional/regressions/test_bug_1914777.py b/nova/tests/functional/regressions/test_bug_1914777.py
index d8c9f5e15f..470c852669 100644
--- a/nova/tests/functional/regressions/test_bug_1914777.py
+++ b/nova/tests/functional/regressions/test_bug_1914777.py
@@ -10,7 +10,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from nova import context as nova_context
from nova import exception
diff --git a/nova/tests/functional/regressions/test_bug_1922053.py b/nova/tests/functional/regressions/test_bug_1922053.py
index 612be27b2b..70bb3d4cab 100644
--- a/nova/tests/functional/regressions/test_bug_1922053.py
+++ b/nova/tests/functional/regressions/test_bug_1922053.py
@@ -1,3 +1,4 @@
+
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
@@ -27,6 +28,7 @@ class ForceUpWithDoneEvacuations(integrated_helpers._IntegratedTestBase):
ADMIN_API = True
microversion = 'latest'
+ expected_state = 'SHUTOFF'
def _create_test_server(self, compute_host):
return self._create_server(host=compute_host, networks='none')
@@ -59,7 +61,8 @@ class ForceUpWithDoneEvacuations(integrated_helpers._IntegratedTestBase):
server = self._evacuate_server(
server,
expected_host='compute2',
- expected_migration_status='done'
+ expected_migration_status='done',
+ expected_state=self.expected_state
)
# Assert that the request to force up the host is rejected
@@ -97,6 +100,7 @@ class ForceUpWithDoneEvacuationsv252(ForceUpWithDoneEvacuations):
"""
microversion = '2.52'
+ expected_state = 'ACTIVE'
def _create_test_server(self, compute_host):
return self._create_server(az='nova:compute', networks='none')
diff --git a/nova/tests/functional/regressions/test_bug_1928063.py b/nova/tests/functional/regressions/test_bug_1928063.py
index b1b1d36e16..94d7b8122c 100644
--- a/nova/tests/functional/regressions/test_bug_1928063.py
+++ b/nova/tests/functional/regressions/test_bug_1928063.py
@@ -11,7 +11,7 @@
# under the License.
import copy
-import mock
+from unittest import mock
from oslo_utils.fixture import uuidsentinel as uuids
@@ -30,7 +30,6 @@ class TestSEVInstanceReboot(base.ServersTestBase):
"""
microversion = 'latest'
- @test.patch_exists(SEV_KERNEL_PARAM_FILE, True)
@test.patch_open(SEV_KERNEL_PARAM_FILE, "1\n")
@mock.patch.object(
fakelibvirt.virConnect, '_domain_capability_features',
@@ -40,7 +39,8 @@ class TestSEVInstanceReboot(base.ServersTestBase):
# Configure the compute to allow SEV based instances and then start
self.flags(num_memory_encrypted_guests=16, group='libvirt')
- self.start_compute()
+ with test.patch_exists(SEV_KERNEL_PARAM_FILE, True):
+ self.start_compute()
# Create a SEV enabled image for the test
sev_image = copy.deepcopy(self.glance.image1)
diff --git a/nova/tests/functional/regressions/test_bug_1937084.py b/nova/tests/functional/regressions/test_bug_1937084.py
index 3ef432ae5e..bec3c9f5cb 100644
--- a/nova/tests/functional/regressions/test_bug_1937084.py
+++ b/nova/tests/functional/regressions/test_bug_1937084.py
@@ -12,7 +12,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from nova import context
from nova import exception
diff --git a/nova/tests/functional/regressions/test_bug_1937375.py b/nova/tests/functional/regressions/test_bug_1937375.py
index 860d80acd7..13a1f5c4c9 100644
--- a/nova/tests/functional/regressions/test_bug_1937375.py
+++ b/nova/tests/functional/regressions/test_bug_1937375.py
@@ -12,7 +12,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from nova import context
from nova import exception
diff --git a/nova/tests/functional/regressions/test_bug_1944619.py b/nova/tests/functional/regressions/test_bug_1944619.py
new file mode 100644
index 0000000000..430a6e3981
--- /dev/null
+++ b/nova/tests/functional/regressions/test_bug_1944619.py
@@ -0,0 +1,76 @@
+# Copyright 2021, Canonical, Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from unittest import mock
+
+from nova import exception as nova_exceptions
+from nova.tests.fixtures import libvirt as fakelibvirt
+from nova.tests.functional import integrated_helpers
+from nova.tests.functional.libvirt import base
+
+
+class TestRollbackWithHWOffloadedOVS(
+ base.LibvirtMigrationMixin,
+ base.ServersTestBase,
+ integrated_helpers.InstanceHelperMixin
+):
+ """Regression test for bug LP#1944619
+
+ Assert the behaviour observed in bug LP#1944619 caused by the live
+ migration cleanup code being used to cleanup pre-live migration failures.
+ When SRIOV devices are in use on a VM, that will cause the source host to
+ try to re-attach a VIF not actually de-attached causing a failure.
+
+ The exception mocked in pre_live_migration reproduce an arbitrary error
+ that might cause the pre-live migration process to fail and
+ rollback_live_migration_at_source reproduce the device re-attach failure.
+ """
+
+ api_major_version = 'v2.1'
+ microversion = 'latest'
+ ADMIN_API = True
+
+ def setUp(self):
+ super().setUp()
+
+ self.start_compute(
+ hostname='src',
+ host_info=fakelibvirt.HostInfo(
+ cpu_nodes=1, cpu_sockets=1, cpu_cores=4, cpu_threads=1))
+ self.start_compute(
+ hostname='dest',
+ host_info=fakelibvirt.HostInfo(
+ cpu_nodes=1, cpu_sockets=1, cpu_cores=4, cpu_threads=1))
+
+ self.src = self.computes['src']
+ self.dest = self.computes['dest']
+
+ def test_rollback_pre_live_migration(self):
+ self.server = self._create_server(host='src', networks='none')
+
+ lib_path = "nova.virt.libvirt.driver.LibvirtDriver"
+ funtion_path = "pre_live_migration"
+ mock_lib_path_prelive = "%s.%s" % (lib_path, funtion_path)
+ with mock.patch(mock_lib_path_prelive,
+ side_effect=nova_exceptions.DestinationDiskExists(
+ path='/var/non/existent')) as mlpp:
+ funtion_path = "rollback_live_migration_at_source"
+ mock_lib_path_rollback = "%s.%s" % (lib_path, funtion_path)
+ with mock.patch(mock_lib_path_rollback) as mlpr:
+ # Live migrate the instance to another host
+ self._live_migrate(self.server,
+ migration_expected_state='failed',
+ server_expected_state='ACTIVE')
+ mlpr.assert_not_called()
+ mlpp.assert_called_once()
diff --git a/nova/tests/functional/regressions/test_bug_1951656.py b/nova/tests/functional/regressions/test_bug_1951656.py
new file mode 100644
index 0000000000..d705ff6fe3
--- /dev/null
+++ b/nova/tests/functional/regressions/test_bug_1951656.py
@@ -0,0 +1,73 @@
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo_utils import uuidutils
+
+
+from nova.tests.fixtures import libvirt as fakelibvirt
+from nova.tests.functional.libvirt import test_vgpu
+from nova.virt.libvirt import utils as libvirt_utils
+
+
+class VGPUTestsLibvirt7_7(test_vgpu.VGPUTestBase):
+
+ def _create_mdev(self, physical_device, mdev_type, uuid=None):
+ # We need to fake the newly created sysfs object by adding a new
+ # FakeMdevDevice in the existing persisted Connection object so
+ # when asking to get the existing mdevs, we would see it.
+ if not uuid:
+ uuid = uuidutils.generate_uuid()
+ mdev_name = libvirt_utils.mdev_uuid2name(uuid)
+ libvirt_parent = self.pci2libvirt_address(physical_device)
+
+ # Libvirt 7.7 now creates mdevs with a parent_addr suffix.
+ new_mdev_name = '_'.join([mdev_name, libvirt_parent])
+
+ # Here, we get the right compute thanks by the self.current_host that
+ # was modified just before
+ connection = self.computes[
+ self._current_host].driver._host.get_connection()
+ connection.mdev_info.devices.update(
+ {mdev_name: fakelibvirt.FakeMdevDevice(dev_name=new_mdev_name,
+ type_id=mdev_type,
+ parent=libvirt_parent)})
+ return uuid
+
+ def setUp(self):
+ super(VGPUTestsLibvirt7_7, self).setUp()
+ extra_spec = {"resources:VGPU": "1"}
+ self.flavor = self._create_flavor(extra_spec=extra_spec)
+
+ # Start compute1 supporting only nvidia-11
+ self.flags(
+ enabled_mdev_types=fakelibvirt.NVIDIA_11_VGPU_TYPE,
+ group='devices')
+
+ self.compute1 = self.start_compute_with_vgpu('host1')
+
+ def test_create_servers_with_vgpu(self):
+
+ # Create a single instance against a specific compute node.
+ self._create_server(
+ image_uuid='155d900f-4e14-4e4c-a73d-069cbf4541e6',
+ flavor_id=self.flavor, host=self.compute1.host,
+ networks='auto', expected_state='ACTIVE')
+
+ self.assert_mdev_usage(self.compute1, expected_amount=1)
+
+ self._create_server(
+ image_uuid='155d900f-4e14-4e4c-a73d-069cbf4541e6',
+ flavor_id=self.flavor, host=self.compute1.host,
+ networks='auto', expected_state='ACTIVE')
+
+ self.assert_mdev_usage(self.compute1, expected_amount=2)
diff --git a/nova/tests/functional/regressions/test_bug_1978983.py b/nova/tests/functional/regressions/test_bug_1978983.py
new file mode 100644
index 0000000000..51465900da
--- /dev/null
+++ b/nova/tests/functional/regressions/test_bug_1978983.py
@@ -0,0 +1,71 @@
+# Copyright 2022 Red Hat, Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova import test
+from nova.tests import fixtures as nova_fixtures
+from nova.tests.functional import fixtures as func_fixtures
+from nova.tests.functional import integrated_helpers
+
+
+class EvacuateServerWithTaskState(
+ test.TestCase, integrated_helpers.InstanceHelperMixin,
+):
+ """Regression test for bug 1978983
+ If instance task state is powering-off or not None
+ instance should be allowed to evacuate.
+ """
+
+ def setUp(self):
+ super().setUp()
+ # Stub out external dependencies.
+ self.useFixture(nova_fixtures.NeutronFixture(self))
+ self.useFixture(nova_fixtures.GlanceFixture(self))
+ self.useFixture(func_fixtures.PlacementFixture())
+ self.useFixture(nova_fixtures.HostNameWeigherFixture())
+
+ # Start nova controller services.
+ self.start_service('conductor')
+ self.start_service('scheduler')
+
+ api_fixture = self.useFixture(nova_fixtures.OSAPIFixture(
+ api_version='v2.1'))
+ self.api = api_fixture.admin_api
+ self.api.microversion = 'latest'
+
+ self.src = self._start_compute(host='host1')
+ self.dest = self._start_compute(host='host2')
+
+ def test_evacuate_instance(self):
+ """Evacuating a server
+ """
+ server = self._create_server(networks=[])
+
+ server = self._wait_for_state_change(server, 'ACTIVE')
+ self.assertEqual(self.src.host, server['OS-EXT-SRV-ATTR:host'])
+
+ # stop host1 compute service
+ self.src.stop()
+ self.api.put_service_force_down(self.src.service_ref.uuid, True)
+
+ # poweroff instance
+ self._stop_server(server, wait_for_stop=False)
+ server = self._wait_for_server_parameter(
+ server, {'OS-EXT-STS:task_state': 'powering-off'})
+
+ # evacuate instance
+ server = self._evacuate_server(
+ server, expected_host=self.dest.host
+ )
+ self.assertEqual(self.dest.host, server['OS-EXT-SRV-ATTR:host'])
diff --git a/nova/tests/functional/regressions/test_bug_1980720.py b/nova/tests/functional/regressions/test_bug_1980720.py
new file mode 100644
index 0000000000..ad2e6e6ba2
--- /dev/null
+++ b/nova/tests/functional/regressions/test_bug_1980720.py
@@ -0,0 +1,68 @@
+# Copyright (C) 2022 Red Hat, Inc
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+from nova.tests import fixtures as nova_fixtures
+from nova.tests.functional.api import client
+from nova.tests.functional import integrated_helpers
+from nova.tests.functional.libvirt import base
+from unittest import mock
+
+
+class LibvirtDriverTests(
+ base.ServersTestBase,
+ integrated_helpers.InstanceHelperMixin
+):
+ api_major_version = 'v2.1'
+ microversion = 'latest'
+
+ def setUp(self):
+ super(LibvirtDriverTests, self).setUp()
+ self.cinder = self.useFixture(nova_fixtures.CinderFixture(self))
+ self.start_compute()
+
+ def _create_server_with_block_device(self):
+ server_request = self._build_server(
+ networks=[],
+ )
+ # removing imageRef is required as we want
+ # to boot from volume
+ server_request.pop('imageRef')
+ server_request['block_device_mapping_v2'] = [{
+ 'boot_index': 0,
+ 'source_type': 'volume',
+ 'uuid': nova_fixtures.CinderFixture.IMAGE_BACKED_VOL_QUIESCE,
+ 'destination_type': 'volume'}]
+
+ server = self.api.post_server({
+ 'server': server_request,
+ })
+ self._wait_for_state_change(server, 'ACTIVE')
+ return server
+
+ def test_snapshot_quiesce_fail(self):
+ server = self._create_server_with_block_device()
+ with mock.patch.object(
+ nova_fixtures.libvirt.Domain, 'fsFreeze'
+ ) as mock_obj:
+ ex = nova_fixtures.libvirt.libvirtError("Error")
+ ex.err = (nova_fixtures.libvirt.VIR_ERR_AGENT_UNRESPONSIVE,)
+
+ mock_obj.side_effect = ex
+ excep = self.assertRaises(
+ client.OpenStackApiException,
+ self._snapshot_server, server, "snapshot-1"
+ )
+ self.assertEqual(409, excep.response.status_code)
diff --git a/nova/tests/functional/regressions/test_bug_1983753.py b/nova/tests/functional/regressions/test_bug_1983753.py
new file mode 100644
index 0000000000..3658d6aeb8
--- /dev/null
+++ b/nova/tests/functional/regressions/test_bug_1983753.py
@@ -0,0 +1,177 @@
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+import fixtures
+
+from oslo_serialization import jsonutils
+
+from nova.tests.fixtures import libvirt as fakelibvirt
+from nova.tests.functional.libvirt import test_pci_sriov_servers
+
+
+class TestPciResize(test_pci_sriov_servers._PCIServersTestBase):
+ # these tests use multiple different configs so the whitelist is set by
+ # each testcase individually
+ PCI_DEVICE_SPEC = []
+ PCI_ALIAS = [
+ jsonutils.dumps(x)
+ for x in [
+ {
+ "vendor_id": fakelibvirt.PCI_VEND_ID,
+ "product_id": fakelibvirt.PCI_PROD_ID,
+ "name": "a-pci-dev",
+ },
+ {
+ "vendor_id": fakelibvirt.PCI_VEND_ID,
+ "product_id": fakelibvirt.PF_PROD_ID,
+ "device_type": "type-PF",
+ "name": "a-pf",
+ },
+ {
+ "vendor_id": fakelibvirt.PCI_VEND_ID,
+ "product_id": fakelibvirt.VF_PROD_ID,
+ "device_type": "type-VF",
+ "name": "a-vf",
+ },
+ ]
+ ]
+
+ def setUp(self):
+ super().setUp()
+ self.useFixture(
+ fixtures.MockPatch(
+ 'nova.virt.libvirt.driver.LibvirtDriver.'
+ 'migrate_disk_and_power_off',
+ return_value='{}'
+ )
+ )
+ # These tests should not depend on the host's sysfs
+ self.useFixture(
+ fixtures.MockPatch('nova.pci.utils.is_physical_function'))
+ self.useFixture(
+ fixtures.MockPatch(
+ 'nova.pci.utils.get_function_by_ifname',
+ return_value=(None, False)
+ )
+ )
+
+ def _test_resize_from_two_devs_to_one_dev(self, num_pci_on_dest):
+ # The fake libvirt will emulate on the host:
+ # * two type-PCI in slot 0, 1
+ compute1_pci_info = fakelibvirt.HostPCIDevicesInfo(num_pci=2)
+ # the config matches the PCI dev
+ compute1_device_spec = [
+ jsonutils.dumps(x)
+ for x in [
+ {
+ "vendor_id": fakelibvirt.PCI_VEND_ID,
+ "product_id": fakelibvirt.PCI_PROD_ID,
+ },
+ ]
+ ]
+ self.flags(group='pci', passthrough_whitelist=compute1_device_spec)
+ self.start_compute(hostname="compute1", pci_info=compute1_pci_info)
+ self.assertPCIDeviceCounts("compute1", total=2, free=2)
+
+ # create a server that requests two PCI devs
+ extra_spec = {"pci_passthrough:alias": "a-pci-dev:2"}
+ flavor_id = self._create_flavor(extra_spec=extra_spec)
+ server = self._create_server(flavor_id=flavor_id, networks=[])
+ self.assertPCIDeviceCounts("compute1", total=2, free=0)
+
+ # start another compute with a different amount of PCI dev available
+ compute2_pci_info = fakelibvirt.HostPCIDevicesInfo(
+ num_pci=num_pci_on_dest)
+ # the config matches the PCI dev
+ compute2_device_spec = [
+ jsonutils.dumps(x)
+ for x in [
+ {
+ "vendor_id": fakelibvirt.PCI_VEND_ID,
+ "product_id": fakelibvirt.PCI_PROD_ID,
+ },
+ ]
+ ]
+ self.flags(group='pci', passthrough_whitelist=compute2_device_spec)
+ self.start_compute(hostname="compute2", pci_info=compute2_pci_info)
+ self.assertPCIDeviceCounts(
+ "compute2", total=num_pci_on_dest, free=num_pci_on_dest)
+
+ # resize the server to request only one PCI dev instead of the current
+ # two. This should fit to compute2 having at least one dev
+ extra_spec = {"pci_passthrough:alias": "a-pci-dev:1"}
+ flavor_id = self._create_flavor(extra_spec=extra_spec)
+ self._resize_server(server, flavor_id=flavor_id)
+ self._confirm_resize(server)
+ self.assertPCIDeviceCounts("compute1", total=2, free=2)
+ self.assertPCIDeviceCounts(
+ "compute2", total=num_pci_on_dest, free=num_pci_on_dest - 1)
+
+ def test_resize_from_two_devs_to_one_dev_dest_has_two_devs(self):
+ self._test_resize_from_two_devs_to_one_dev(num_pci_on_dest=2)
+
+ def test_resize_from_two_devs_to_one_dev_dest_has_one_dev(self):
+ self._test_resize_from_two_devs_to_one_dev(num_pci_on_dest=1)
+
+ def test_resize_from_vf_to_pf(self):
+ # The fake libvirt will emulate on the host:
+ # * one type-PF in slot 0 with one VF
+ compute1_pci_info = fakelibvirt.HostPCIDevicesInfo(
+ num_pfs=1, num_vfs=1)
+ # the config matches only the VF
+ compute1_device_spec = [
+ jsonutils.dumps(x)
+ for x in [
+ {
+ "vendor_id": fakelibvirt.PCI_VEND_ID,
+ "product_id": fakelibvirt.VF_PROD_ID,
+ },
+ ]
+ ]
+ self.flags(group='pci', passthrough_whitelist=compute1_device_spec)
+ self.start_compute(hostname="compute1", pci_info=compute1_pci_info)
+ self.assertPCIDeviceCounts("compute1", total=1, free=1)
+
+ # create a server that requests one Vf
+ extra_spec = {"pci_passthrough:alias": "a-vf:1"}
+ flavor_id = self._create_flavor(extra_spec=extra_spec)
+ server = self._create_server(flavor_id=flavor_id, networks=[])
+ self.assertPCIDeviceCounts("compute1", total=1, free=0)
+
+ # start another compute with a single PF dev available
+ # The fake libvirt will emulate on the host:
+ # * one type-PF in slot 0 with 1 VF
+ compute2_pci_info = fakelibvirt.HostPCIDevicesInfo(
+ num_pfs=1, num_vfs=1)
+ # the config matches the PF dev but not the VF
+ compute2_device_spec = [
+ jsonutils.dumps(x)
+ for x in [
+ {
+ "vendor_id": fakelibvirt.PCI_VEND_ID,
+ "product_id": fakelibvirt.PF_PROD_ID,
+ },
+ ]
+ ]
+ self.flags(group='pci', passthrough_whitelist=compute2_device_spec)
+ self.start_compute(hostname="compute2", pci_info=compute2_pci_info)
+ self.assertPCIDeviceCounts("compute2", total=1, free=1)
+
+ # resize the server to request on PF dev instead of the current VF
+ # dev. This should fit to compute2 having exactly one PF dev.
+ extra_spec = {"pci_passthrough:alias": "a-pf:1"}
+ flavor_id = self._create_flavor(extra_spec=extra_spec)
+ self._resize_server(server, flavor_id=flavor_id)
+ self._confirm_resize(server)
+ self.assertPCIDeviceCounts("compute1", total=1, free=1)
+ self.assertPCIDeviceCounts("compute2", total=1, free=0)
diff --git a/nova/tests/functional/regressions/test_bug_1995153.py b/nova/tests/functional/regressions/test_bug_1995153.py
new file mode 100644
index 0000000000..f4e61d06df
--- /dev/null
+++ b/nova/tests/functional/regressions/test_bug_1995153.py
@@ -0,0 +1,107 @@
+# Copyright (C) 2023 Red Hat, Inc
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+import fixtures
+from unittest import mock
+
+from oslo_serialization import jsonutils
+from oslo_utils import units
+
+from nova.objects import fields
+from nova.tests.fixtures import libvirt as fakelibvirt
+from nova.tests.functional import integrated_helpers
+from nova.tests.functional.libvirt import base
+
+
+class Bug1995153RegressionTest(
+ base.ServersTestBase,
+ integrated_helpers.InstanceHelperMixin
+):
+
+ ADDITIONAL_FILTERS = ['NUMATopologyFilter', 'PciPassthroughFilter']
+
+ ALIAS_NAME = 'a1'
+ PCI_DEVICE_SPEC = [jsonutils.dumps(
+ {
+ 'vendor_id': fakelibvirt.PCI_VEND_ID,
+ 'product_id': fakelibvirt.PCI_PROD_ID,
+ }
+ )]
+ # we set the numa_affinity policy to required to ensure strict affinity
+ # between pci devices and the guest cpu and memory will be enforced.
+ PCI_ALIAS = [jsonutils.dumps(
+ {
+ 'vendor_id': fakelibvirt.PCI_VEND_ID,
+ 'product_id': fakelibvirt.PCI_PROD_ID,
+ 'name': ALIAS_NAME,
+ 'device_type': fields.PciDeviceType.STANDARD,
+ 'numa_policy': fields.PCINUMAAffinityPolicy.REQUIRED,
+ }
+ )]
+
+ def setUp(self):
+ super(Bug1995153RegressionTest, self).setUp()
+ self.flags(
+ device_spec=self.PCI_DEVICE_SPEC,
+ alias=self.PCI_ALIAS,
+ group='pci'
+ )
+ host_manager = self.scheduler.manager.host_manager
+ pci_filter_class = host_manager.filter_cls_map['PciPassthroughFilter']
+ host_pass_mock = mock.Mock(wraps=pci_filter_class().host_passes)
+ self.mock_filter = self.useFixture(fixtures.MockPatch(
+ 'nova.scheduler.filters.pci_passthrough_filter'
+ '.PciPassthroughFilter.host_passes',
+ side_effect=host_pass_mock)).mock
+
+ def test_socket_policy_bug_1995153(self):
+ """Previously, the numa_usage_from_instance_numa() method in
+ hardware.py saved the host NUMAToplogy object with NUMACells that have
+ no `socket` set. This was an omission in the original implementation of
+ the `socket` PCI NUMA affinity policy. The consequence was that any
+ code path that called into numa_usage_from_instance_numa() would
+ clobber the host NUMA topology in the database with a socket-less
+ version. Booting an instance with NUMA toplogy would do that, for
+ example. If then a second instance was booted with the `socket` PCI
+ NUMA affinity policy, it would read the socket-less host NUMATopology
+ from the database, and error out with a NotImplementedError. This was
+ bug 1995153. Demonstrate that this is fixed.
+ """
+ host_info = fakelibvirt.HostInfo(
+ cpu_nodes=2, cpu_sockets=1, cpu_cores=2, cpu_threads=2,
+ kB_mem=(16 * units.Gi) // units.Ki)
+ self.flags(cpu_dedicated_set='0-3', group='compute')
+ pci_info = fakelibvirt.HostPCIDevicesInfo(num_pci=1, numa_node=1)
+
+ self.start_compute(host_info=host_info, pci_info=pci_info)
+
+ extra_spec = {
+ 'hw:cpu_policy': 'dedicated',
+ 'pci_passthrough:alias': '%s:1' % self.ALIAS_NAME,
+ 'hw:pci_numa_affinity_policy': 'socket'
+ }
+ # Boot a first instance with a guest NUMA topology to run the
+ # numa_usage_from_instance_numa() and update the host NUMATopology in
+ # the database.
+ self._create_server(
+ flavor_id=self._create_flavor(
+ extra_spec={'hw:cpu_policy': 'dedicated'}))
+
+ # Boot an instance with the `socket` PCI NUMA affinity policy and
+ # assert that it boots correctly now.
+ flavor_id = self._create_flavor(extra_spec=extra_spec)
+ self._create_server(flavor_id=flavor_id)
+ self.assertTrue(self.mock_filter.called)
diff --git a/nova/tests/functional/test_aggregates.py b/nova/tests/functional/test_aggregates.py
index 8dfb345578..1ffa3ada92 100644
--- a/nova/tests/functional/test_aggregates.py
+++ b/nova/tests/functional/test_aggregates.py
@@ -935,11 +935,11 @@ class TestAggregateMultiTenancyIsolationFilter(
# Start nova services.
self.start_service('conductor')
- self.admin_api = self.useFixture(
- nova_fixtures.OSAPIFixture(api_version='v2.1')).admin_api
- self.api = self.useFixture(
- nova_fixtures.OSAPIFixture(api_version='v2.1',
- project_id=uuids.non_admin)).api
+ api_fixture = self.useFixture(
+ nova_fixtures.OSAPIFixture(api_version='v2.1'))
+ self.admin_api = api_fixture.admin_api
+ self.api = api_fixture.api
+ self.api.project_id = uuids.non_admin
# Add the AggregateMultiTenancyIsolation to the list of enabled
# filters since it is not enabled by default.
enabled_filters = CONF.filter_scheduler.enabled_filters
@@ -1037,15 +1037,15 @@ class AggregateMultiTenancyIsolationColdMigrateTest(
self.glance = self.useFixture(nova_fixtures.GlanceFixture(self))
self.useFixture(nova_fixtures.NeutronFixture(self))
self.useFixture(func_fixtures.PlacementFixture())
- # Intentionally keep these separate since we want to create the
- # server with the non-admin user in a different project.
- admin_api_fixture = self.useFixture(nova_fixtures.OSAPIFixture(
+ # Intentionally define different project id for the two client since
+ # we want to create the server with the non-admin user in a different
+ # project.
+ api_fixture = self.useFixture(nova_fixtures.OSAPIFixture(
api_version='v2.1', project_id=uuids.admin_project))
- self.admin_api = admin_api_fixture.admin_api
+ self.admin_api = api_fixture.admin_api
self.admin_api.microversion = 'latest'
- user_api_fixture = self.useFixture(nova_fixtures.OSAPIFixture(
- api_version='v2.1', project_id=uuids.user_project))
- self.api = user_api_fixture.api
+ self.api = api_fixture.api
+ self.api.project_id = uuids.user_project
self.api.microversion = 'latest'
self.start_service('conductor')
diff --git a/nova/tests/functional/test_availability_zones.py b/nova/tests/functional/test_availability_zones.py
index 991f86148d..c376423303 100644
--- a/nova/tests/functional/test_availability_zones.py
+++ b/nova/tests/functional/test_availability_zones.py
@@ -10,12 +10,16 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+from nova.api.openstack.compute import hosts
+from nova.compute import instance_actions
from nova import context
from nova import objects
from nova import test
from nova.tests import fixtures as nova_fixtures
+from nova.tests.functional.api import client as api_client
from nova.tests.functional import fixtures as func_fixtures
from nova.tests.functional import integrated_helpers
+from nova.tests.unit.api.openstack import fakes
class TestAvailabilityZoneScheduling(
@@ -36,6 +40,9 @@ class TestAvailabilityZoneScheduling(
self.api = api_fixture.admin_api
self.api.microversion = 'latest'
+ self.controller = hosts.HostController()
+ self.req = fakes.HTTPRequest.blank('', use_admin_context=True)
+
self.start_service('conductor')
self.start_service('scheduler')
@@ -68,18 +75,18 @@ class TestAvailabilityZoneScheduling(
self.api.api_post(
'/os-aggregates/%s/action' % aggregate['id'], add_host_body)
- def _create_server(self, name):
+ def _create_server(self, name, zone=None):
# Create a server, it doesn't matter which host it ends up in.
server = super(TestAvailabilityZoneScheduling, self)._create_server(
flavor_id=self.flavor1,
- networks='none',)
- original_host = server['OS-EXT-SRV-ATTR:host']
- # Assert the server has the AZ set (not None or 'nova').
- expected_zone = 'zone1' if original_host == 'host1' else 'zone2'
- self.assertEqual(expected_zone, server['OS-EXT-AZ:availability_zone'])
+ networks='none',
+ az=zone,
+ )
return server
- def _assert_instance_az(self, server, expected_zone):
+ def _assert_instance_az_and_host(
+ self, server, expected_zone, expected_host=None):
+ # Check AZ
# Check the API.
self.assertEqual(expected_zone, server['OS-EXT-AZ:availability_zone'])
# Check the DB.
@@ -88,6 +95,51 @@ class TestAvailabilityZoneScheduling(
ctxt, self.cell_mappings[test.CELL1_NAME]) as cctxt:
instance = objects.Instance.get_by_uuid(cctxt, server['id'])
self.assertEqual(expected_zone, instance.availability_zone)
+ # Check host
+ if expected_host:
+ self.assertEqual(expected_host, server['OS-EXT-SRV-ATTR:host'])
+
+ def _assert_request_spec_az(self, ctxt, server, az):
+ request_spec = objects.RequestSpec.get_by_instance_uuid(
+ ctxt, server['id'])
+ self.assertEqual(request_spec.availability_zone, az)
+
+ def _assert_server_with_az_unshelved_to_specified_az(self, server, az):
+ """Ensure a server with an az constraints is unshelved in the
+ corresponding az.
+ """
+ host_to_disable = 'host1' if az == 'zone1' else 'host2'
+ self._shelve_server(server, expected_state='SHELVED_OFFLOADED')
+ compute_service_id = self.api.get_services(
+ host=host_to_disable, binary='nova-compute')[0]['id']
+ self.api.put_service(compute_service_id, {'status': 'disabled'})
+
+ req = {
+ 'unshelve': None
+ }
+
+ self.api.post_server_action(server['id'], req)
+
+ server = self._wait_for_action_fail_completion(
+ server, instance_actions.UNSHELVE, 'schedule_instances')
+ self.assertIn('Error', server['result'])
+ self.assertIn('No valid host', server['details'])
+
+ def _shelve_unshelve_server(self, ctxt, server, req):
+ self._shelve_server(server, expected_state='SHELVED_OFFLOADED')
+
+ self.api.post_server_action(server['id'], req)
+ server = self._wait_for_server_parameter(
+ server,
+ {'status': 'ACTIVE', },
+ )
+ return self.api.get_server(server['id'])
+
+ def other_az_than(self, az):
+ return 'zone2' if az == 'zone1' else 'zone1'
+
+ def other_host_than(self, host):
+ return 'host2' if host == 'host1' else 'host1'
def test_live_migrate_implicit_az(self):
"""Tests live migration of an instance with an implicit AZ.
@@ -111,7 +163,8 @@ class TestAvailabilityZoneScheduling(
still not restricted to its current zone even if it says it is in one.
"""
server = self._create_server('test_live_migrate_implicit_az')
- original_host = server['OS-EXT-SRV-ATTR:host']
+ original_az = server['OS-EXT-AZ:availability_zone']
+ expected_zone = self.other_az_than(original_az)
# Attempt to live migrate the instance; again, we don't specify a host
# because there are only two hosts so the scheduler would only be able
@@ -132,8 +185,379 @@ class TestAvailabilityZoneScheduling(
# the database because the API will return the AZ from the host
# aggregate if instance.host is not None.
server = self.api.get_server(server['id'])
- expected_zone = 'zone2' if original_host == 'host1' else 'zone1'
- self._assert_instance_az(server, expected_zone)
+ self._assert_instance_az_and_host(server, expected_zone)
+
+ def test_create_server(self):
+ """Create a server without an AZ constraint and make sure asking a new
+ request spec will not have the request_spec.availability_zone set.
+ """
+ ctxt = context.get_admin_context()
+ server = self._create_server('server01')
+ self._assert_request_spec_az(ctxt, server, None)
+
+ def test_create_server_to_zone(self):
+ """Create a server with an AZ constraint and make sure asking a new
+ request spec will have the request_spec.availability_zone to the
+ required zone.
+ """
+ ctxt = context.get_admin_context()
+ server = self._create_server('server01', 'zone2')
+
+ server = self.api.get_server(server['id'])
+ self._assert_instance_az_and_host(server, 'zone2')
+ self._assert_request_spec_az(ctxt, server, 'zone2')
+
+ def test_cold_migrate_cross_az(self):
+ """Test a cold migration cross AZ.
+ """
+ server = self._create_server('server01')
+ original_host = server['OS-EXT-SRV-ATTR:host']
+ original_az = server['OS-EXT-AZ:availability_zone']
+ expected_host = self.other_host_than(original_host)
+ expected_zone = self.other_az_than(original_az)
+
+ self._migrate_server(server)
+ self._confirm_resize(server)
+
+ server = self.api.get_server(server['id'])
+ self._assert_instance_az_and_host(server, expected_zone, expected_host)
+
+# Next tests attempt to check the following behavior
+# +----------+---------------------------+-------+----------------------------+
+# | Boot | Unshelve after offload AZ | Host | Result |
+# +==========+===========================+=======+============================+
+# | No AZ | No AZ or AZ=null | No | Free scheduling, |
+# | | | | reqspec.AZ=None |
+# +----------+---------------------------+-------+----------------------------+
+# | No AZ | No AZ or AZ=null | Host1 | Schedule to host1, |
+# | | | | reqspec.AZ=None |
+# +----------+---------------------------+-------+----------------------------+
+# | No AZ | AZ="AZ1" | No | Schedule to AZ1, |
+# | | | | reqspec.AZ="AZ1" |
+# +----------+---------------------------+-------+----------------------------+
+# | No AZ | AZ="AZ1" | Host1 | Verify that host1 in AZ1, |
+# | | | | or (1). Schedule to |
+# | | | | host1, reqspec.AZ="AZ1" |
+# +----------+---------------------------+-------+----------------------------+
+# | AZ1 | No AZ | No | Schedule to AZ1, |
+# | | | | reqspec.AZ="AZ1" |
+# +----------+---------------------------+-------+----------------------------+
+# | AZ1 | AZ=null | No | Free scheduling, |
+# | | | | reqspec.AZ=None |
+# +----------+---------------------------+-------+----------------------------+
+# | AZ1 | No AZ | Host1 | If host1 is in AZ1, |
+# | | | | then schedule to host1, |
+# | | | | reqspec.AZ="AZ1", otherwise|
+# | | | | reject the request (1) |
+# +----------+---------------------------+-------+----------------------------+
+# | AZ1 | AZ=null | Host1 | Schedule to host1, |
+# | | | | reqspec.AZ=None |
+# +----------+---------------------------+-------+----------------------------+
+# | AZ1 | AZ="AZ2" | No | Schedule to AZ2, |
+# | | | | reqspec.AZ="AZ2" |
+# +----------+---------------------------+-------+----------------------------+
+# | AZ1 | AZ="AZ2" | Host1 | If host1 in AZ2 then |
+# | | | | schedule to host1, |
+# | | | | reqspec.AZ="AZ2", |
+# | | | | otherwise reject (1) |
+# +----------+---------------------------+-------+----------------------------+
+#
+# (1) Check at the api and return an error.
+#
+#
+# +----------+---------------------------+-------+----------------------------+
+# | No AZ | No AZ or AZ=null | No | Free scheduling, |
+# | | | | reqspec.AZ=None |
+# +----------+---------------------------+-------+----------------------------+
+
+ def test_unshelve_server_without_az_contraint(self):
+ ctxt = context.get_admin_context()
+ server = self._create_server('server01')
+
+ req = {
+ 'unshelve': None
+ }
+
+ self._shelve_unshelve_server(ctxt, server, req)
+ self._assert_request_spec_az(ctxt, server, None)
+
+ def test_unshelve_unpin_az_server_without_az_contraint(self):
+ ctxt = context.get_admin_context()
+ server = self._create_server('server01')
+
+ req = {
+ 'unshelve': {'availability_zone': None}
+ }
+
+ self._shelve_unshelve_server(ctxt, server, req)
+ self._assert_request_spec_az(ctxt, server, None)
+
+# +----------+---------------------------+-------+----------------------------+
+# | No AZ | No AZ or AZ=null | Host1 | Schedule to host1, |
+# | | | | reqspec.AZ=None |
+# +----------+---------------------------+-------+----------------------------+
+ def test_unshelve_to_host_server_without_az_contraint(self):
+ ctxt = context.get_admin_context()
+ server = self._create_server('server01')
+ original_host = server['OS-EXT-SRV-ATTR:host']
+ original_az = server['OS-EXT-AZ:availability_zone']
+ dest_hostname = self.other_host_than(original_host)
+ expected_zone = self.other_az_than(original_az)
+
+ req = {
+ 'unshelve': {'host': dest_hostname}
+ }
+
+ server = self._shelve_unshelve_server(ctxt, server, req)
+ self._assert_instance_az_and_host(server, expected_zone, dest_hostname)
+ self._assert_request_spec_az(ctxt, server, None)
+
+ def test_unshelve_to_host_and_unpin_server_without_az_contraint(self):
+ ctxt = context.get_admin_context()
+ server = self._create_server('server01')
+ original_host = server['OS-EXT-SRV-ATTR:host']
+ original_az = server['OS-EXT-AZ:availability_zone']
+ dest_hostname = self.other_host_than(original_host)
+ expected_zone = self.other_az_than(original_az)
+
+ req = {
+ 'unshelve': {
+ 'host': dest_hostname,
+ 'availability_zone': None,
+ }
+ }
+
+ server = self._shelve_unshelve_server(ctxt, server, req)
+ self._assert_instance_az_and_host(server, expected_zone, dest_hostname)
+ self._assert_request_spec_az(ctxt, server, None)
+
+# +----------+---------------------------+-------+----------------------------+
+# | No AZ | AZ="AZ1" | No | Schedule to AZ1, |
+# | | | | reqspec.AZ="AZ1" |
+# +----------+---------------------------+-------+----------------------------+
+ def test_unshelve_to_az_server_without_az_constraint(self):
+ ctxt = context.get_admin_context()
+ server = self._create_server('server01')
+ original_host = server['OS-EXT-SRV-ATTR:host']
+ original_az = server['OS-EXT-AZ:availability_zone']
+ dest_hostname = 'host2' if original_host == 'host1' else 'host1'
+ dest_az = self.other_az_than(original_az)
+
+ req = {
+ 'unshelve': {'availability_zone': dest_az}
+ }
+
+ server = self._shelve_unshelve_server(ctxt, server, req)
+ self._assert_instance_az_and_host(server, dest_az, dest_hostname)
+ self._assert_request_spec_az(ctxt, server, dest_az)
+ self._assert_server_with_az_unshelved_to_specified_az(
+ server, dest_az)
+
+# +----------+---------------------------+-------+----------------------------+
+# | No AZ | AZ="AZ1" | Host1 | Verify that host1 in AZ1, |
+# | | | | or (3). Schedule to |
+# | | | | host1, reqspec.AZ="AZ1" |
+# +----------+---------------------------+-------+----------------------------+
+ def test_unshelve_to_az_and_host_server_without_az_constraint(self):
+ ctxt = context.get_admin_context()
+ server = self._create_server('server01')
+ original_host = server['OS-EXT-SRV-ATTR:host']
+ original_az = server['OS-EXT-AZ:availability_zone']
+ dest_hostname = 'host2' if original_host == 'host1' else 'host1'
+ dest_az = self.other_az_than(original_az)
+
+ req = {
+ 'unshelve': {'host': dest_hostname, 'availability_zone': dest_az}
+ }
+
+ server = self._shelve_unshelve_server(ctxt, server, req)
+ self._assert_instance_az_and_host(server, dest_az, dest_hostname)
+ self._assert_request_spec_az(ctxt, server, dest_az)
+ self._assert_server_with_az_unshelved_to_specified_az(
+ server, dest_az)
+
+ def test_unshelve_to_wrong_az_and_host_server_without_az_constraint(self):
+ server = self._create_server('server01')
+ original_host = server['OS-EXT-SRV-ATTR:host']
+ original_az = server['OS-EXT-AZ:availability_zone']
+ dest_hostname = 'host2' if original_host == 'host1' else 'host1'
+
+ req = {
+ 'unshelve': {'host': dest_hostname,
+ 'availability_zone': original_az}
+ }
+
+ self._shelve_server(server, expected_state='SHELVED_OFFLOADED')
+ exc = self.assertRaises(
+ api_client.OpenStackApiException,
+ self.api.post_server_action,
+ server['id'],
+ req
+ )
+
+ self.assertEqual(409, exc.response.status_code)
+ self.assertIn(
+ 'Host \\\"{}\\\" is not in the availability zone \\\"{}\\\".'
+ .format(dest_hostname, original_az),
+ exc.response.text
+ )
+
+# +----------+---------------------------+-------+----------------------------+
+# | AZ1 | No AZ | No | Schedule to AZ1, |
+# | | | | reqspec.AZ="AZ1" |
+# +----------+---------------------------+-------+----------------------------+
+ def test_unshelve_a_server_with_az_contraint(self):
+ ctxt = context.get_admin_context()
+ server = self._create_server('server01', 'zone2')
+
+ req = {
+ 'unshelve': None
+ }
+
+ self._shelve_unshelve_server(ctxt, server, req)
+ self._assert_request_spec_az(ctxt, server, 'zone2')
+ self._assert_server_with_az_unshelved_to_specified_az(
+ server, 'zone2')
+
+# +----------+---------------------------+-------+----------------------------+
+# | AZ1 | AZ=null | No | Free scheduling, |
+# | | | | reqspec.AZ=None |
+# +----------+---------------------------+-------+----------------------------+
+ def test_unshelve_to_unpin_az_a_server_with_az_constraint(self):
+ ctxt = context.get_admin_context()
+ server = self._create_server('server01', 'zone2')
+
+ req = {
+ 'unshelve': {'availability_zone': None}
+ }
+
+ server = self._shelve_unshelve_server(ctxt, server, req)
+ self._assert_request_spec_az(ctxt, server, None)
+
+# +----------+---------------------------+-------+----------------------------+
+# | AZ1 | No AZ | Host1 | If host1 is in AZ1, |
+# | | | | then schedule to host1, |
+# | | | | reqspec.AZ="AZ1", otherwise|
+# | | | | reject the request (3) |
+# +----------+---------------------------+-------+----------------------------+
+ def test_unshelve_to_host_server_with_az_contraint(self):
+ ctxt = context.get_admin_context()
+ server = self._create_server('server01', 'zone1')
+
+ req = {
+ 'unshelve': {'host': 'host1'}
+ }
+
+ server = self._shelve_unshelve_server(ctxt, server, req)
+ self._assert_instance_az_and_host(server, 'zone1', 'host1')
+ self._assert_request_spec_az(ctxt, server, 'zone1')
+ self._assert_server_with_az_unshelved_to_specified_az(
+ server, 'zone1')
+
+ def test_unshelve_to_host_wrong_az_server_with_az_contraint(self):
+ server = self._create_server('server01', 'zone1')
+
+ req = {
+ 'unshelve': {'host': 'host2'}
+ }
+
+ self._shelve_server(server, expected_state='SHELVED_OFFLOADED')
+ exc = self.assertRaises(
+ api_client.OpenStackApiException,
+ self.api.post_server_action,
+ server['id'],
+ req
+ )
+
+ self.assertEqual(409, exc.response.status_code)
+ self.assertIn(
+ 'Host \\\"host2\\\" is not in the availability '
+ 'zone \\\"zone1\\\".',
+ exc.response.text
+ )
+
+# +----------+---------------------------+-------+----------------------------+
+# | AZ1 | AZ=null | Host1 | Schedule to host1, |
+# | | | | reqspec.AZ=None |
+# +----------+---------------------------+-------+----------------------------+
+ def test_unshelve_to_host_and_unpin_server_with_az_contraint(self):
+ ctxt = context.get_admin_context()
+ server = self._create_server('server01', 'zone1')
+
+ req = {
+ 'unshelve': {'host': 'host2',
+ 'availability_zone': None,
+ }
+ }
+
+ server = self._shelve_unshelve_server(ctxt, server, req)
+ self._assert_instance_az_and_host(server, 'zone2', 'host2')
+ self._assert_request_spec_az(ctxt, server, None)
+
+# +----------+---------------------------+-------+----------------------------+
+# | AZ1 | AZ="AZ2" | No | Schedule to AZ2, |
+# | | | | reqspec.AZ="AZ2" |
+# +----------+---------------------------+-------+----------------------------+
+ def test_unshelve_to_az_a_server_with_az_constraint(self):
+ ctxt = context.get_admin_context()
+ server = self._create_server('server01', 'zone1')
+
+ req = {
+ 'unshelve': {'availability_zone': 'zone2'}
+ }
+
+ server = self._shelve_unshelve_server(ctxt, server, req)
+ self._assert_instance_az_and_host(server, 'zone2', 'host2')
+ self._assert_request_spec_az(ctxt, server, 'zone2')
+ self._assert_server_with_az_unshelved_to_specified_az(
+ server, 'zone2')
+
+# +----------+---------------------------+-------+----------------------------+
+# | AZ1 | AZ="AZ2" | Host1 | If host1 in AZ2 then |
+# | | | | schedule to host1, |
+# | | | | reqspec.AZ="AZ2", |
+# | | | | otherwise reject (3) |
+# +----------+---------------------------+-------+----------------------------+
+ def test_unshelve_to_host_and_az_a_server_with_az_constraint(self):
+ ctxt = context.get_admin_context()
+ server = self._create_server('server01', 'zone1')
+
+ req = {
+ 'unshelve': {'host': 'host2',
+ 'availability_zone': 'zone2',
+ }
+ }
+
+ server = self._shelve_unshelve_server(ctxt, server, req)
+ self._assert_instance_az_and_host(server, 'zone2', 'host2')
+ self._assert_request_spec_az(ctxt, server, 'zone2')
+ self._assert_server_with_az_unshelved_to_specified_az(
+ server, 'zone2')
+
+ def test_unshelve_to_host_and_wrong_az_a_server_with_az_constraint(self):
+ server = self._create_server('server01', 'zone1')
+
+ req = {
+ 'unshelve': {'host': 'host2',
+ 'availability_zone': 'zone1',
+ }
+ }
+
+ self._shelve_server(server, expected_state='SHELVED_OFFLOADED')
+ exc = self.assertRaises(
+ api_client.OpenStackApiException,
+ self.api.post_server_action,
+ server['id'],
+ req
+ )
+
+ self.assertEqual(409, exc.response.status_code)
+ self.assertIn(
+ 'Host \\\"host2\\\" is not in the availability '
+ 'zone \\\"zone1\\\".',
+ exc.response.text
+
+ )
def test_resize_revert_across_azs(self):
"""Creates two compute service hosts in separate AZs. Creates a server
@@ -152,9 +576,9 @@ class TestAvailabilityZoneScheduling(
# Now the server should be in the other AZ.
new_zone = 'zone2' if original_host == 'host1' else 'zone1'
- self._assert_instance_az(server, new_zone)
+ self._assert_instance_az_and_host(server, new_zone)
# Revert the resize and the server should be back in the original AZ.
self.api.post_server_action(server['id'], {'revertResize': None})
server = self._wait_for_state_change(server, 'ACTIVE')
- self._assert_instance_az(server, original_az)
+ self._assert_instance_az_and_host(server, original_az)
diff --git a/nova/tests/functional/test_boot_from_volume.py b/nova/tests/functional/test_boot_from_volume.py
index 45555b002d..6396954bf4 100644
--- a/nova/tests/functional/test_boot_from_volume.py
+++ b/nova/tests/functional/test_boot_from_volume.py
@@ -10,7 +10,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+import fixtures
+from unittest import mock
from nova import context
from nova import objects
@@ -50,6 +51,9 @@ class BootFromVolumeTest(integrated_helpers._IntegratedTestBase):
self.flags(allow_resize_to_same_host=True)
super(BootFromVolumeTest, self).setUp()
self.admin_api = self.api_fixture.admin_api
+ self.useFixture(nova_fixtures.CinderFixture(self))
+ self.useFixture(fixtures.MockPatch(
+ 'nova.compute.manager.ComputeVirtAPI.wait_for_instance_event'))
def test_boot_from_volume_larger_than_local_gb(self):
# Verify no local disk is being used currently
@@ -138,6 +142,42 @@ class BootFromVolumeTest(integrated_helpers._IntegratedTestBase):
image_uuid = '155d900f-4e14-4e4c-a73d-069cbf4541e6'
post_data = {'rebuild': {'imageRef': image_uuid}}
self.api.post_server_action(server_id, post_data)
+
+ def test_rebuild_volume_backed_larger_than_local_gb(self):
+ # Verify no local disk is being used currently
+ self._verify_zero_local_gb_used()
+
+ # Create flavors with disk larger than available host local disk
+ flavor_id = self._create_flavor(memory_mb=64, vcpu=1, disk=8192,
+ ephemeral=0)
+
+ # Boot a server with a flavor disk larger than the available local
+ # disk. It should succeed for boot from volume.
+ server = self._build_server(image_uuid='', flavor_id=flavor_id)
+ volume_uuid = nova_fixtures.CinderFixture.IMAGE_BACKED_VOL
+ bdm = {'boot_index': 0,
+ 'uuid': volume_uuid,
+ 'source_type': 'volume',
+ 'destination_type': 'volume'}
+ server['block_device_mapping_v2'] = [bdm]
+ created_server = self.api.post_server({"server": server})
+ server_id = created_server['id']
+ self._wait_for_state_change(created_server, 'ACTIVE')
+
+ # Check that hypervisor local disk reporting is still 0
+ self._verify_zero_local_gb_used()
+ # Check that instance has not been saved with 0 root_gb
+ self._verify_instance_flavor_not_zero(server_id)
+ # Check that request spec has not been saved with 0 root_gb
+ self._verify_request_spec_flavor_not_zero(server_id)
+
+ # Rebuild
+ # The image_uuid is from CinderFixture for the
+ # volume representing IMAGE_BACKED_VOL.
+ self.api.microversion = '2.93'
+ image_uuid = '155d900f-4e14-4e4c-a73d-069cbf4541e6'
+ post_data = {'rebuild': {'imageRef': image_uuid}}
+ self.api.post_server_action(server_id, post_data)
self._wait_for_state_change(created_server, 'ACTIVE')
# Check that hypervisor local disk reporting is still 0
diff --git a/nova/tests/functional/test_cold_migrate.py b/nova/tests/functional/test_cold_migrate.py
index e07820ba2a..b78db14a14 100644
--- a/nova/tests/functional/test_cold_migrate.py
+++ b/nova/tests/functional/test_cold_migrate.py
@@ -10,7 +10,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from nova.compute import api as compute_api
from nova import context as nova_context
diff --git a/nova/tests/functional/test_compute_mgr.py b/nova/tests/functional/test_compute_mgr.py
index 38b7f9d7a6..d8892843b4 100644
--- a/nova/tests/functional/test_compute_mgr.py
+++ b/nova/tests/functional/test_compute_mgr.py
@@ -10,8 +10,9 @@
# License for the specific language governing permissions and limitations
# under the License.
+from unittest import mock
+
import fixtures
-import mock
from nova import context
from nova.network import model as network_model
diff --git a/nova/tests/functional/test_cross_cell_migrate.py b/nova/tests/functional/test_cross_cell_migrate.py
index a1186ca7a5..92cf805945 100644
--- a/nova/tests/functional/test_cross_cell_migrate.py
+++ b/nova/tests/functional/test_cross_cell_migrate.py
@@ -11,7 +11,7 @@
# under the License.
import datetime
-import mock
+from unittest import mock
from oslo_db import exception as oslo_db_exc
from oslo_utils import fixture as osloutils_fixture
diff --git a/nova/tests/functional/test_ephemeral_encryption.py b/nova/tests/functional/test_ephemeral_encryption.py
new file mode 100644
index 0000000000..ba5e411902
--- /dev/null
+++ b/nova/tests/functional/test_ephemeral_encryption.py
@@ -0,0 +1,381 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo_utils.fixture import uuidsentinel
+
+from nova import context
+from nova import objects
+from nova.tests.functional import integrated_helpers
+
+
+class _TestEphemeralEncryptionBase(
+ integrated_helpers.ProviderUsageBaseTestCase
+):
+ # NOTE(lyarwood): A dict of test flavors defined per test class,
+ # keyed by flavor name and providing an additional dict containing an 'id'
+ # and optional 'extra_specs' dict. For example:
+ # {
+ # 'name': {
+ # 'id': uuidsentinel.flavor_id
+ # 'extra_specs': {
+ # 'hw:foo': 'bar'
+ # }
+ # }
+ # }
+ flavors = {}
+
+ def setUp(self):
+ super().setUp()
+
+ self.ctxt = context.get_admin_context()
+
+ # Create the required test flavors
+ for name, details in self.flavors.items():
+ flavor = self.admin_api.post_flavor({
+ 'flavor': {
+ 'name': name,
+ 'id': details['id'],
+ 'ram': 512,
+ 'vcpus': 1,
+ 'disk': 1024,
+ }
+ })
+ # Add the optional extra_specs
+ if details.get('extra_specs'):
+ self.admin_api.post_extra_spec(
+ flavor['id'], {'extra_specs': details['extra_specs']})
+
+ # We only need a single compute for these tests
+ self._start_compute(host='compute1')
+
+ def _assert_ephemeral_encryption_enabled(
+ self, server_id, encryption_format=None):
+ bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
+ self.ctxt, server_id)
+ for bdm in bdms:
+ if bdm.is_local:
+ self.assertTrue(bdm.encrypted)
+ if encryption_format:
+ self.assertEqual(
+ encryption_format, bdm.encryption_format)
+
+ def _assert_ephemeral_encryption_disabled(self, server_id):
+ bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
+ self.ctxt, server_id)
+ for bdm in bdms:
+ if bdm.is_local:
+ self.assertFalse(bdm.encrypted)
+
+
+class TestEphemeralEncryptionAvailable(_TestEphemeralEncryptionBase):
+
+ compute_driver = 'fake.EphEncryptionDriver'
+ flavors = {
+ 'no_eph_encryption': {
+ 'id': uuidsentinel.no_eph_encryption
+ },
+ 'eph_encryption': {
+ 'id': uuidsentinel.eph_encryption_flavor,
+ 'extra_specs': {
+ 'hw:ephemeral_encryption': 'True'
+ }
+ },
+ 'eph_encryption_disabled': {
+ 'id': uuidsentinel.eph_encryption_disabled_flavor,
+ 'extra_specs': {
+ 'hw:ephemeral_encryption': 'False'
+ }
+ },
+ }
+
+ def test_image_requested(self):
+ server_request = self._build_server(
+ flavor_id=uuidsentinel.no_eph_encryption,
+ image_uuid=uuidsentinel.eph_encryption,
+ networks=[])
+ server_id = self._assert_build_request_success(server_request)
+ self._assert_ephemeral_encryption_enabled(server_id)
+
+ def test_image_disabled(self):
+ server_request = self._build_server(
+ flavor_id=uuidsentinel.no_eph_encryption,
+ image_uuid=uuidsentinel.eph_encryption_disabled,
+ networks=[])
+ server_id = self._assert_build_request_success(server_request)
+ self._assert_ephemeral_encryption_disabled(server_id)
+
+ def test_flavor_requested(self):
+ server_request = self._build_server(
+ flavor_id=uuidsentinel.eph_encryption_flavor,
+ networks=[])
+ server_id = self._assert_build_request_success(server_request)
+ self._assert_ephemeral_encryption_enabled(server_id)
+
+ def test_flavor_disabled(self):
+ server_request = self._build_server(
+ flavor_id=uuidsentinel.eph_encryption_disabled_flavor,
+ networks=[])
+ server_id = self._assert_build_request_success(server_request)
+ self._assert_ephemeral_encryption_disabled(server_id)
+
+ def test_flavor_and_image_requested(self):
+ server_request = self._build_server(
+ flavor_id=uuidsentinel.eph_encryption_flavor,
+ image_uuid=uuidsentinel.eph_encryption,
+ networks=[])
+ server_id = self._assert_build_request_success(server_request)
+ self._assert_ephemeral_encryption_enabled(server_id)
+
+ def test_flavor_and_image_disabled(self):
+ server_request = self._build_server(
+ flavor_id=uuidsentinel.eph_encryption_disabled_flavor,
+ image_uuid=uuidsentinel.eph_encryption_disabled,
+ networks=[])
+ server_id = self._assert_build_request_success(server_request)
+ self._assert_ephemeral_encryption_disabled(server_id)
+
+ def test_flavor_requested_and_image_disabled(self):
+ server_request = self._build_server(
+ flavor_id=uuidsentinel.eph_encryption_flavor,
+ image_uuid=uuidsentinel.eph_encryption_disabled,
+ networks=[])
+ self._assert_bad_build_request_error(server_request)
+
+ def test_flavor_disabled_and_image_requested(self):
+ server_request = self._build_server(
+ flavor_id=uuidsentinel.eph_encryption_disabled_flavor,
+ image_uuid=uuidsentinel.eph_encryption,
+ networks=[])
+ self._assert_bad_build_request_error(server_request)
+
+
+class TestEphemeralEncryptionUnavailable(_TestEphemeralEncryptionBase):
+
+ compute_driver = 'fake.MediumFakeDriver'
+ flavors = {
+ 'no_eph_encryption': {
+ 'id': uuidsentinel.no_eph_encryption
+ },
+ 'eph_encryption': {
+ 'id': uuidsentinel.eph_encryption_flavor,
+ 'extra_specs': {
+ 'hw:ephemeral_encryption': 'True'
+ }
+ },
+ 'eph_encryption_disabled': {
+ 'id': uuidsentinel.eph_encryption_disabled_flavor,
+ 'extra_specs': {
+ 'hw:ephemeral_encryption': 'False'
+ }
+ },
+ }
+
+ def test_requested_but_unavailable(self):
+ server_request = self._build_server(
+ flavor_id=uuidsentinel.eph_encryption_flavor,
+ image_uuid=uuidsentinel.eph_encryption,
+ networks=[])
+ self._assert_build_request_schedule_failure(server_request)
+
+ def test_image_disabled(self):
+ server_request = self._build_server(
+ image_uuid=uuidsentinel.eph_encryption_disabled,
+ flavor_id=uuidsentinel.no_eph_encryption,
+ networks=[])
+ server_id = self._assert_build_request_success(server_request)
+ self._assert_ephemeral_encryption_disabled(server_id)
+
+ def test_flavor_disabled(self):
+ server_request = self._build_server(
+ flavor_id=uuidsentinel.eph_encryption_disabled_flavor,
+ networks=[])
+ server_id = self._assert_build_request_success(server_request)
+ self._assert_ephemeral_encryption_disabled(server_id)
+
+
+class TestEphemeralEncryptionLUKS(TestEphemeralEncryptionAvailable):
+
+ compute_driver = 'fake.EphEncryptionDriverLUKS'
+ flavors = {
+ 'no_eph_encryption': {
+ 'id': uuidsentinel.no_eph_encryption
+ },
+ 'eph_encryption': {
+ 'id': uuidsentinel.eph_encryption_flavor,
+ 'extra_specs': {
+ 'hw:ephemeral_encryption': 'True'
+ }
+ },
+ 'eph_encryption_disabled': {
+ 'id': uuidsentinel.eph_encryption_disabled_flavor,
+ 'extra_specs': {
+ 'hw:ephemeral_encryption': 'False'
+ }
+ },
+ 'eph_encryption_luks': {
+ 'id': uuidsentinel.eph_encryption_luks_flavor,
+ 'extra_specs': {
+ 'hw:ephemeral_encryption': 'True',
+ 'hw:ephemeral_encryption_format': 'luks'
+ }
+ },
+ 'eph_encryption_plain': {
+ 'id': uuidsentinel.eph_encryption_plain_flavor,
+ 'extra_specs': {
+ 'hw:ephemeral_encryption': 'True',
+ 'hw:ephemeral_encryption_format': 'plain'
+ }
+ },
+
+ }
+
+ def test_image_requested_luks(self):
+ server_request = self._build_server(
+ flavor_id=uuidsentinel.no_eph_encryption,
+ image_uuid=uuidsentinel.eph_encryption_luks,
+ networks=[])
+ server_id = self._assert_build_request_success(server_request)
+ self._assert_ephemeral_encryption_enabled(
+ server_id, encryption_format='luks')
+
+ def test_flavor_requested_luks(self):
+ server_request = self._build_server(
+ flavor_id=uuidsentinel.eph_encryption_luks_flavor,
+ networks=[])
+ server_id = self._assert_build_request_success(server_request)
+ self._assert_ephemeral_encryption_enabled(
+ server_id, encryption_format='luks')
+
+ def test_flavor_and_image_requested_luks(self):
+ server_request = self._build_server(
+ flavor_id=uuidsentinel.eph_encryption_luks_flavor,
+ image_uuid=uuidsentinel.eph_encryption_luks,
+ networks=[])
+ server_id = self._assert_build_request_success(server_request)
+ self._assert_ephemeral_encryption_enabled(
+ server_id, encryption_format='luks')
+
+ def test_image_requested_plain(self):
+ server_request = self._build_server(
+ flavor_id=uuidsentinel.no_eph_encryption,
+ image_uuid=uuidsentinel.eph_encryption_plain,
+ networks=[])
+ self._assert_build_request_schedule_failure(server_request)
+
+ def test_flavor_requested_plain(self):
+ server_request = self._build_server(
+ flavor_id=uuidsentinel.eph_encryption_plain_flavor,
+ networks=[])
+ self._assert_build_request_schedule_failure(server_request)
+
+ def test_image_requested_luks_flavor_requested_plain(self):
+ server_request = self._build_server(
+ image_uuid=uuidsentinel.eph_encryption_luks,
+ flavor_id=uuidsentinel.eph_encryption_plain_flavor,
+ networks=[])
+ self._assert_bad_build_request_error(server_request)
+
+ def test_image_requested_plain_flavor_requested_luks(self):
+ server_request = self._build_server(
+ image_uuid=uuidsentinel.eph_encryption_plain,
+ flavor_id=uuidsentinel.eph_encryption_luks_flavor,
+ networks=[])
+ self._assert_bad_build_request_error(server_request)
+
+
+class TestEphemeralEncryptionPLAIN(_TestEphemeralEncryptionBase):
+
+ compute_driver = 'fake.EphEncryptionDriverPLAIN'
+ flavors = {
+ 'no_eph_encryption': {
+ 'id': uuidsentinel.no_eph_encryption
+ },
+ 'eph_encryption': {
+ 'id': uuidsentinel.eph_encryption_flavor,
+ 'extra_specs': {
+ 'hw:ephemeral_encryption': 'True'
+ }
+ },
+ 'eph_encryption_disabled': {
+ 'id': uuidsentinel.eph_encryption_disabled_flavor,
+ 'extra_specs': {
+ 'hw:ephemeral_encryption': 'False'
+ }
+ },
+ 'eph_encryption_luks': {
+ 'id': uuidsentinel.eph_encryption_luks_flavor,
+ 'extra_specs': {
+ 'hw:ephemeral_encryption': 'True',
+ 'hw:ephemeral_encryption_format': 'luks'
+ }
+ },
+ 'eph_encryption_plain': {
+ 'id': uuidsentinel.eph_encryption_plain_flavor,
+ 'extra_specs': {
+ 'hw:ephemeral_encryption': 'True',
+ 'hw:ephemeral_encryption_format': 'plain'
+ }
+ },
+ }
+
+ def test_image_requested_plain(self):
+ server_request = self._build_server(
+ flavor_id=uuidsentinel.no_eph_encryption,
+ image_uuid=uuidsentinel.eph_encryption_plain,
+ networks=[])
+ server_id = self._assert_build_request_success(server_request)
+ self._assert_ephemeral_encryption_enabled(
+ server_id, encryption_format='plain')
+
+ def test_flavor_requested_plain(self):
+ server_request = self._build_server(
+ flavor_id=uuidsentinel.eph_encryption_plain_flavor,
+ networks=[])
+ server_id = self._assert_build_request_success(server_request)
+ self._assert_ephemeral_encryption_enabled(
+ server_id, encryption_format='plain')
+
+ def test_flavor_and_image_requested_plain(self):
+ server_request = self._build_server(
+ flavor_id=uuidsentinel.eph_encryption_plain_flavor,
+ image_uuid=uuidsentinel.eph_encryption_plain,
+ networks=[])
+ server_id = self._assert_build_request_success(server_request)
+ self._assert_ephemeral_encryption_enabled(
+ server_id, encryption_format='plain')
+
+ def test_image_requested_luks(self):
+ server_request = self._build_server(
+ flavor_id=uuidsentinel.no_eph_encryption,
+ image_uuid=uuidsentinel.eph_encryption_luks,
+ networks=[])
+ self._assert_build_request_schedule_failure(server_request)
+
+ def test_flavor_requested_luks(self):
+ server_request = self._build_server(
+ flavor_id=uuidsentinel.eph_encryption_luks_flavor,
+ networks=[])
+ self._assert_build_request_schedule_failure(server_request)
+
+ def test_image_requested_plain_flavor_requested_luks(self):
+ server_request = self._build_server(
+ image_uuid=uuidsentinel.eph_encryption_plain,
+ flavor_id=uuidsentinel.eph_encryption_luks_flavor,
+ networks=[])
+ self._assert_bad_build_request_error(server_request)
+
+ def test_image_requested_luks_flavor_requested_plain(self):
+ server_request = self._build_server(
+ image_uuid=uuidsentinel.eph_encryption_luks,
+ flavor_id=uuidsentinel.eph_encryption_plain_flavor,
+ networks=[])
+ self._assert_bad_build_request_error(server_request)
diff --git a/nova/tests/functional/test_images.py b/nova/tests/functional/test_images.py
index 340e883da9..e7e9f2a6c9 100644
--- a/nova/tests/functional/test_images.py
+++ b/nova/tests/functional/test_images.py
@@ -12,7 +12,6 @@
from oslo_utils.fixture import uuidsentinel as uuids
-from nova.tests import fixtures as nova_fixtures
from nova.tests.functional.api import client
from nova.tests.functional import integrated_helpers
@@ -70,10 +69,9 @@ class ImagesTest(integrated_helpers._IntegratedTestBase):
server = self.api.post_server({"server": server})
server = self._wait_for_state_change(server, 'ACTIVE')
- # Create an admin API fixture with a unique project ID.
- admin_api = self.useFixture(
- nova_fixtures.OSAPIFixture(
- project_id=uuids.admin_project)).admin_api
+ # use an admin API with a unique project ID.
+ admin_api = self.api_fixture.alternative_admin_api
+ admin_api.project_id = uuids.admin_project
# Create a snapshot of the server using the admin project.
name = 'admin-created-snapshot'
diff --git a/nova/tests/functional/test_instance_actions.py b/nova/tests/functional/test_instance_actions.py
index 054def5183..060133ce93 100644
--- a/nova/tests/functional/test_instance_actions.py
+++ b/nova/tests/functional/test_instance_actions.py
@@ -12,7 +12,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from oslo_policy import policy as oslo_policy
from nova import exception
@@ -58,6 +59,15 @@ class InstanceActionsTestV221(InstanceActionsTestV21):
self.assertEqual('delete', actions[0]['action'])
self.assertEqual('create', actions[1]['action'])
+ def test_get_instance_actions_shelve_deleted(self):
+ server = self._create_server()
+ self._shelve_server(server)
+ self._delete_server(server)
+ actions = self.api.get_instance_actions(server['id'])
+ self.assertEqual('delete', actions[0]['action'])
+ self.assertEqual('shelve', actions[1]['action'])
+ self.assertEqual('create', actions[2]['action'])
+
class HypervisorError(Exception):
"""This is just used to make sure the exception type is in the events."""
diff --git a/nova/tests/functional/test_ip_allocation.py b/nova/tests/functional/test_ip_allocation.py
new file mode 100644
index 0000000000..a899641abe
--- /dev/null
+++ b/nova/tests/functional/test_ip_allocation.py
@@ -0,0 +1,53 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova.tests.functional import integrated_helpers
+
+
+class IPAllocationTests(integrated_helpers._IntegratedTestBase):
+ """Test behavior with various IP allocation policies.
+
+ This mainly exists to test the 'deferred' and 'none' policies.
+ """
+ compute_driver = 'fake.MediumFakeDriver'
+ microversion = 'latest'
+ ADMIN_API = True
+
+ def setUp(self):
+ super().setUp()
+
+ # add a port with an ip_allocation of 'none'
+ port = {
+ 'name': '',
+ 'description': '',
+ 'network_id': self.neutron.network_1['id'],
+ 'admin_state_up': True,
+ 'status': 'ACTIVE',
+ 'mac_address': 'ee:94:88:57:d5:7a',
+ # The ip_allocation is 'none', so fixed_ips should be null
+ 'fixed_ips': [],
+ 'tenant_id': self.neutron.tenant_id,
+ 'project_id': self.neutron.tenant_id,
+ 'device_id': '',
+ 'binding:profile': {},
+ 'binding:vnic_type': 'normal',
+ 'binding:vif_type': 'ovs',
+ 'binding:vif_details': {},
+ 'ip_allocation': 'none',
+ }
+ created_port = self.neutron.create_port({'port': port})
+ self.port_id = created_port['port']['id']
+
+ def test_boot_with_none_policy(self):
+ """Create a port with the 'none' policy."""
+ self._create_server(
+ networks=[{'port': self.port_id}])
diff --git a/nova/tests/functional/test_monkey_patch.py b/nova/tests/functional/test_monkey_patch.py
deleted file mode 100644
index b471d333cf..0000000000
--- a/nova/tests/functional/test_monkey_patch.py
+++ /dev/null
@@ -1,45 +0,0 @@
-# Copyright 2020 Red Hat, Inc. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# NOTE(artom) This file exists to test eventlet monkeypatching. How and what
-# eventlet monkeypatches can be controlled by environment variables that
-# are processed by eventlet at import-time (for exmaple, EVENTLET_NO_GREENDNS).
-# Nova manages all of this in nova.monkey_patch. Therefore, nova.monkey_patch
-# must be the first thing to import eventlet. As nova.tests.functional.__init__
-# imports nova.monkey_patch, we're OK here.
-
-import socket
-import traceback
-
-from nova import test
-
-
-class TestMonkeyPatch(test.TestCase):
-
- def test_greendns_is_disabled(self):
- """Try to resolve a fake fqdn. If we see greendns mentioned in the
- traceback of the raised exception, it means we've not actually disabled
- greendns. See the TODO and NOTE in nova.monkey_patch to understand why
- greendns needs to be disabled.
- """
- raised = False
- try:
- socket.gethostbyname('goat.fake')
- except Exception:
- tb = traceback.format_exc()
- # NOTE(artom) If we've correctly disabled greendns, we expect the
- # traceback to not contain any reference to it.
- self.assertNotIn('greendns.py', tb)
- raised = True
- self.assertTrue(raised)
diff --git a/nova/tests/functional/test_nova_manage.py b/nova/tests/functional/test_nova_manage.py
index b6fddc84b2..888b43cea0 100644
--- a/nova/tests/functional/test_nova_manage.py
+++ b/nova/tests/functional/test_nova_manage.py
@@ -15,9 +15,9 @@ import collections
import datetime
from io import StringIO
import os.path
+from unittest import mock
import fixtures
-import mock
from neutronclient.common import exceptions as neutron_client_exc
import os_resource_classes as orc
from oslo_serialization import jsonutils
diff --git a/nova/tests/functional/test_policy.py b/nova/tests/functional/test_policy.py
index 28339bdd0f..bb72915336 100644
--- a/nova/tests/functional/test_policy.py
+++ b/nova/tests/functional/test_policy.py
@@ -60,7 +60,7 @@ class HostStatusPolicyTestCase(test.TestCase,
overwrite=False)
# Create a server as a normal non-admin user.
# In microversion 2.36 the /images proxy API was deprecated, so
- # specifiy the image_uuid directly.
+ # specify the image_uuid directly.
kwargs = {'image_uuid': self.image_uuid}
if networks:
# Starting with microversion 2.37 the networks field is required.
diff --git a/nova/tests/functional/test_report_client.py b/nova/tests/functional/test_report_client.py
index 7f9e3bbb79..a5da9f87b4 100644
--- a/nova/tests/functional/test_report_client.py
+++ b/nova/tests/functional/test_report_client.py
@@ -12,13 +12,14 @@
# under the License.
import copy
+from unittest import mock
+
import ddt
from keystoneauth1 import exceptions as kse
-import mock
+import microversion_parse
import os_resource_classes as orc
import os_traits as ot
from oslo_utils.fixture import uuidsentinel as uuids
-import pkg_resources
from nova.cmd import status
from nova.compute import provider_tree
@@ -39,9 +40,6 @@ from nova.tests.functional import fixtures as func_fixtures
CONF = conf.CONF
-CMD_STATUS_MIN_MICROVERSION = pkg_resources.parse_version(
- status.MIN_PLACEMENT_MICROVERSION)
-
class VersionCheckingReportClient(report.SchedulerReportClient):
"""This wrapper around SchedulerReportClient checks microversions for
@@ -57,14 +55,18 @@ class VersionCheckingReportClient(report.SchedulerReportClient):
if not microversion:
return
- seen_microversion = pkg_resources.parse_version(microversion)
- if seen_microversion > CMD_STATUS_MIN_MICROVERSION:
+ min_microversion = microversion_parse.parse_version_string(
+ status.MIN_PLACEMENT_MICROVERSION)
+ got_microversion = microversion_parse.parse_version_string(
+ microversion)
+ if got_microversion > min_microversion:
raise ValueError(
"Report client is using microversion %s, but nova.cmd.status "
"is only requiring %s. See "
"I4369f7fb1453e896864222fa407437982be8f6b5 for an example of "
"how to bump the minimum requirement." %
- (microversion, status.MIN_PLACEMENT_MICROVERSION))
+ (got_microversion, min_microversion)
+ )
def get(self, *args, **kwargs):
self._check_microversion(kwargs)
@@ -1361,6 +1363,17 @@ class SchedulerReportClientTests(test.TestCase):
resp = self.client._reshape(self.context, inventories, allocs)
self.assertEqual(204, resp.status_code)
+ # Trigger generation conflict
+ # We can do this is by simply sending back the same reshape as that
+ # will not work because the previous reshape updated generations
+ self.assertRaises(
+ exception.PlacementReshapeConflict,
+ self.client._reshape,
+ self.context,
+ inventories,
+ allocs,
+ )
+
def test_update_from_provider_tree_reshape(self):
"""Run update_from_provider_tree with reshaping."""
exp_ptree = self._set_up_provider_tree()
@@ -1517,3 +1530,44 @@ class SchedulerReportClientTests(test.TestCase):
self.context, self.compute_name)
self.assertProviderTree(orig_exp_ptree, ptree)
self.assertAllocations(orig_exp_allocs, allocs)
+
+ def test_update_from_provider_tree_reshape_conflict_retry(self):
+ exp_ptree = self._set_up_provider_tree()
+
+ ptree = self.client.get_provider_tree_and_ensure_root(
+ self.context, self.compute_uuid)
+ allocs = self.client.get_allocations_for_provider_tree(
+ self.context, self.compute_name)
+ self.assertProviderTree(exp_ptree, ptree)
+ self.assertAllocations({}, allocs)
+
+ exp_allocs = self._set_up_provider_tree_allocs()
+
+ # we prepare inventory and allocation changes to trigger a reshape
+ for rp_uuid in ptree.get_provider_uuids():
+ # Add a new resource class to the inventories
+ ptree.update_inventory(
+ rp_uuid, dict(ptree.data(rp_uuid).inventory,
+ CUSTOM_FOO={'total': 10}))
+ exp_ptree[rp_uuid]['inventory']['CUSTOM_FOO'] = {'total': 10}
+ for c_uuid, alloc in allocs.items():
+ for rp_uuid, res in alloc['allocations'].items():
+ res['resources']['CUSTOM_FOO'] = 1
+ exp_allocs[c_uuid]['allocations'][rp_uuid][
+ 'resources']['CUSTOM_FOO'] = 1
+
+ # As the inventory update happens is the same request as the allocation
+ # update the allocation update will have a generation conflict.
+ # So we expect that it is signalled with an exception so that the
+ # upper layer can re-drive the reshape process with a fresh tree that
+ # now has the inventories
+ self.assertRaises(
+ exception.PlacementReshapeConflict,
+ self.client.update_from_provider_tree,
+ self.context,
+ ptree,
+ allocations=allocs,
+ )
+ # also we except that the internal caches is cleared so that the
+ # re-drive will have a chance to load fresh data from placement
+ self.assertEqual(0, len(self.client._provider_tree.roots))
diff --git a/nova/tests/functional/test_routed_networks.py b/nova/tests/functional/test_routed_networks.py
index 19c5d3c59f..616780a219 100644
--- a/nova/tests/functional/test_routed_networks.py
+++ b/nova/tests/functional/test_routed_networks.py
@@ -11,7 +11,7 @@
# under the License.
import copy
-import mock
+from unittest import mock
from oslo_utils.fixture import uuidsentinel as uuids
diff --git a/nova/tests/functional/test_server_faults.py b/nova/tests/functional/test_server_faults.py
index 91f813f070..edc3c3b377 100644
--- a/nova/tests/functional/test_server_faults.py
+++ b/nova/tests/functional/test_server_faults.py
@@ -10,7 +10,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from nova import test
from nova.tests import fixtures as nova_fixtures
diff --git a/nova/tests/functional/test_server_group.py b/nova/tests/functional/test_server_group.py
index 08e47b3971..01e3547f7e 100644
--- a/nova/tests/functional/test_server_group.py
+++ b/nova/tests/functional/test_server_group.py
@@ -13,7 +13,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from oslo_config import cfg
from nova.compute import instance_actions
@@ -64,12 +65,12 @@ class ServerGroupTestBase(test.TestCase,
self.useFixture(nova_fixtures.NeutronFixture(self))
self.useFixture(func_fixtures.PlacementFixture())
- api_fixture = self.useFixture(nova_fixtures.OSAPIFixture(
+ self.api_fixture = self.useFixture(nova_fixtures.OSAPIFixture(
api_version='v2.1'))
- self.api = api_fixture.api
+ self.api = self.api_fixture.api
self.api.microversion = self.microversion
- self.admin_api = api_fixture.admin_api
+ self.admin_api = self.api_fixture.admin_api
self.admin_api.microversion = self.microversion
self.start_service('conductor')
@@ -102,7 +103,10 @@ class ServerGroupFakeDriver(fake.SmallFakeDriver):
"""
vcpus = 1000
- memory_mb = 8192
+ # the testcases were built with a default ram allocation ratio
+ # of 1.5 and 8192 mb of ram so to maintain the same capacity with
+ # the new default allocation ratio of 1.0 we use 8192+4096=12288
+ memory_mb = 12288
local_gb = 100000
@@ -174,13 +178,8 @@ class ServerGroupTestV21(ServerGroupTestBase):
# Create an API using project 'openstack1'.
# This is a non-admin API.
- #
- # NOTE(sdague): this is actually very much *not* how this
- # fixture should be used. This actually spawns a whole
- # additional API server. Should be addressed in the future.
- api_openstack1 = self.useFixture(nova_fixtures.OSAPIFixture(
- api_version=self.api_major_version,
- project_id=PROJECT_ID_ALT)).api
+ api_openstack1 = self.api_fixture.alternative_api
+ api_openstack1.project_id = PROJECT_ID_ALT
api_openstack1.microversion = self.microversion
# Create a server group in project 'openstack'
@@ -445,7 +444,8 @@ class ServerGroupTestV21(ServerGroupTestBase):
evacuated_server = self._evacuate_server(
servers[1], {'onSharedStorage': 'False'},
- expected_migration_status='done')
+ expected_migration_status='done',
+ expected_state='ACTIVE')
# check that the server is evacuated to another host
self.assertNotEqual(evacuated_server['OS-EXT-SRV-ATTR:host'],
@@ -622,7 +622,8 @@ class ServerGroupTestV215(ServerGroupTestV21):
compute3 = self.start_service('compute', host='host3')
evacuated_server = self._evacuate_server(
- servers[1], expected_migration_status='done')
+ servers[1], expected_migration_status='done',
+ expected_state='ACTIVE')
# check that the server is evacuated
self.assertNotEqual(evacuated_server['OS-EXT-SRV-ATTR:host'],
@@ -801,7 +802,8 @@ class ServerGroupTestV215(ServerGroupTestV21):
self._set_forced_down(host, True)
evacuated_server = self._evacuate_server(
- servers[1], expected_migration_status='done')
+ servers[1], expected_migration_status='done',
+ expected_state='ACTIVE')
# Note(gibi): need to get the server again as the state of the instance
# goes to ACTIVE first then the host of the instance changes to the
@@ -871,6 +873,54 @@ class ServerGroupTestV264(ServerGroupTestV215):
self.assertEqual(2, hosts.count(host))
+class ServerGroupTestV295(ServerGroupTestV264):
+ microversion = '2.95'
+
+ def _evacuate_with_soft_anti_affinity_policies(self, group):
+ created_group = self.api.post_server_groups(group)
+ servers = self._boot_servers_to_group(created_group)
+
+ host = self._get_compute_service_by_host_name(
+ servers[1]['OS-EXT-SRV-ATTR:host'])
+ # Set forced_down on the host to ensure nova considers the host down.
+ self._set_forced_down(host, True)
+
+ evacuated_server = self._evacuate_server(
+ servers[1], expected_migration_status='done')
+
+ # Note(gibi): need to get the server again as the state of the instance
+ # goes to ACTIVE first then the host of the instance changes to the
+ # new host later
+ evacuated_server = self.admin_api.get_server(evacuated_server['id'])
+
+ return [evacuated_server['OS-EXT-SRV-ATTR:host'],
+ servers[0]['OS-EXT-SRV-ATTR:host']]
+
+ def test_evacuate_with_anti_affinity(self):
+ created_group = self.api.post_server_groups(self.anti_affinity)
+ servers = self._boot_servers_to_group(created_group)
+
+ host = self._get_compute_service_by_host_name(
+ servers[1]['OS-EXT-SRV-ATTR:host'])
+ # Set forced_down on the host to ensure nova considers the host down.
+ self._set_forced_down(host, True)
+
+ # Start additional host to test evacuation
+ compute3 = self.start_service('compute', host='host3')
+
+ evacuated_server = self._evacuate_server(
+ servers[1], expected_migration_status='done')
+
+ # check that the server is evacuated
+ self.assertNotEqual(evacuated_server['OS-EXT-SRV-ATTR:host'],
+ servers[1]['OS-EXT-SRV-ATTR:host'])
+ # check that policy is kept
+ self.assertNotEqual(evacuated_server['OS-EXT-SRV-ATTR:host'],
+ servers[0]['OS-EXT-SRV-ATTR:host'])
+
+ compute3.kill()
+
+
class ServerGroupTestMultiCell(ServerGroupTestBase):
NUMBER_OF_CELLS = 2
diff --git a/nova/tests/functional/test_server_rescue.py b/nova/tests/functional/test_server_rescue.py
index fa96c10344..8f5b912943 100644
--- a/nova/tests/functional/test_server_rescue.py
+++ b/nova/tests/functional/test_server_rescue.py
@@ -10,6 +10,10 @@
# License for the specific language governing permissions and limitations
# under the License.
+import datetime
+
+from oslo_utils.fixture import uuidsentinel as uuids
+
from nova.tests import fixtures as nova_fixtures
from nova.tests.functional.api import client
from nova.tests.functional import integrated_helpers
@@ -23,7 +27,37 @@ class BFVRescue(integrated_helpers.ProviderUsageBaseTestCase):
self.useFixture(nova_fixtures.CinderFixture(self))
self._start_compute(host='host1')
- def _create_bfv_server(self):
+ def _create_image(self, metadata=None):
+ image = {
+ 'id': uuids.stable_rescue_image,
+ 'name': 'fake-image-rescue-property',
+ 'created_at': datetime.datetime(2011, 1, 1, 1, 2, 3),
+ 'updated_at': datetime.datetime(2011, 1, 1, 1, 2, 3),
+ 'deleted_at': None,
+ 'deleted': False,
+ 'status': 'active',
+ 'is_public': False,
+ 'container_format': 'raw',
+ 'disk_format': 'raw',
+ 'size': '25165824',
+ 'min_ram': 0,
+ 'min_disk': 0,
+ 'protected': False,
+ 'visibility': 'public',
+ 'tags': ['tag1', 'tag2'],
+ 'properties': {
+ 'kernel_id': 'nokernel',
+ 'ramdisk_id': 'nokernel',
+ 'hw_rescue_device': 'disk',
+ 'hw_rescue_bus': 'scsi',
+ },
+ }
+ if metadata:
+ image['properties'].update(metadata)
+ return self.glance.create(None, image)
+
+ def _create_bfv_server(self, metadata=None):
+ image = self._create_image(metadata=metadata)
server_request = self._build_server(networks=[])
server_request.pop('imageRef')
server_request['block_device_mapping_v2'] = [{
@@ -33,7 +67,7 @@ class BFVRescue(integrated_helpers.ProviderUsageBaseTestCase):
'destination_type': 'volume'}]
server = self.api.post_server({'server': server_request})
self._wait_for_state_change(server, 'ACTIVE')
- return server
+ return server, image
class DisallowBFVRescuev286(BFVRescue):
@@ -43,10 +77,10 @@ class DisallowBFVRescuev286(BFVRescue):
microversion = '2.86'
def test_bfv_rescue_not_supported(self):
- server = self._create_bfv_server()
+ server, image = self._create_bfv_server()
ex = self.assertRaises(client.OpenStackApiException,
self.api.post_server_action, server['id'], {'rescue': {
- 'rescue_image_ref': '155d900f-4e14-4e4c-a73d-069cbf4541e6'}})
+ 'rescue_image_ref': image['id']}})
self.assertEqual(400, ex.response.status_code)
self.assertIn('Cannot rescue a volume-backed instance',
ex.response.text)
@@ -60,10 +94,10 @@ class DisallowBFVRescuev286WithTrait(BFVRescue):
microversion = '2.86'
def test_bfv_rescue_not_supported(self):
- server = self._create_bfv_server()
+ server, image = self._create_bfv_server()
ex = self.assertRaises(client.OpenStackApiException,
self.api.post_server_action, server['id'], {'rescue': {
- 'rescue_image_ref': '155d900f-4e14-4e4c-a73d-069cbf4541e6'}})
+ 'rescue_image_ref': image['id']}})
self.assertEqual(400, ex.response.status_code)
self.assertIn('Cannot rescue a volume-backed instance',
ex.response.text)
@@ -77,10 +111,10 @@ class DisallowBFVRescuev287WithoutTrait(BFVRescue):
microversion = '2.87'
def test_bfv_rescue_not_supported(self):
- server = self._create_bfv_server()
+ server, image = self._create_bfv_server()
ex = self.assertRaises(client.OpenStackApiException,
self.api.post_server_action, server['id'], {'rescue': {
- 'rescue_image_ref': '155d900f-4e14-4e4c-a73d-069cbf4541e6'}})
+ 'rescue_image_ref': image['id']}})
self.assertEqual(400, ex.response.status_code)
self.assertIn('Host unable to rescue a volume-backed instance',
ex.response.text)
@@ -94,7 +128,41 @@ class AllowBFVRescuev287WithTrait(BFVRescue):
microversion = '2.87'
def test_bfv_rescue_supported(self):
- server = self._create_bfv_server()
+ server, image = self._create_bfv_server()
self.api.post_server_action(server['id'], {'rescue': {
+ 'rescue_image_ref': image['id']}})
+ self._wait_for_state_change(server, 'RESCUE')
+
+
+class DisallowBFVRescuev287WithoutRescueImageProperties(BFVRescue):
+ """Asserts that BFV rescue requests fail with microversion 2.87 (or later)
+ when the required hw_rescue_device and hw_rescue_bus image properties
+ are not set on the image.
+ """
+ compute_driver = 'fake.MediumFakeDriver'
+ microversion = '2.87'
+
+ def test_bfv_rescue_failed(self):
+ server, image = self._create_bfv_server()
+ # try rescue without hw_rescue_device and hw_rescue_bus properties set
+ ex = self.assertRaises(client.OpenStackApiException,
+ self.api.post_server_action, server['id'], {'rescue': {
'rescue_image_ref': '155d900f-4e14-4e4c-a73d-069cbf4541e6'}})
+ self.assertEqual(400, ex.response.status_code)
+ self.assertIn('Cannot rescue a volume-backed instance',
+ ex.response.text)
+
+
+class AllowBFVRescuev287WithRescueImageProperties(BFVRescue):
+ """Asserts that BFV rescue requests pass with microversion 2.87 (or later)
+ when the required hw_rescue_device and hw_rescue_bus image properties
+ are set on the image.
+ """
+ compute_driver = 'fake.RescueBFVDriver'
+ microversion = '2.87'
+
+ def test_bfv_rescue_done(self):
+ server, image = self._create_bfv_server()
+ self.api.post_server_action(server['id'], {'rescue': {
+ 'rescue_image_ref': image['id']}})
self._wait_for_state_change(server, 'RESCUE')
diff --git a/nova/tests/functional/test_servers.py b/nova/tests/functional/test_servers.py
index e77d4bf1ea..5887c99081 100644
--- a/nova/tests/functional/test_servers.py
+++ b/nova/tests/functional/test_servers.py
@@ -17,10 +17,11 @@ import collections
import copy
import datetime
import time
+from unittest import mock
import zlib
+from cinderclient import exceptions as cinder_exception
from keystoneauth1 import adapter
-import mock
from oslo_config import cfg
from oslo_log import log as logging
from oslo_serialization import base64
@@ -764,7 +765,7 @@ class ServersTest(integrated_helpers._IntegratedTestBase):
LOG.info('Attaching volume %s to server %s', volume_id, server_id)
# The fake driver doesn't implement get_device_name_for_instance, so
- # we'll just raise the exception directly here, instead of simuluating
+ # we'll just raise the exception directly here, instead of simulating
# an instance with 26 disk devices already attached.
with mock.patch.object(self.compute.driver,
'get_device_name_for_instance') as mock_get:
@@ -1253,9 +1254,7 @@ class ServerTestV269(integrated_helpers._IntegratedTestBase):
def test_get_servers_detail_filters(self):
# We get the results only from the up cells, this ignoring the down
# cells if list_records_by_skipping_down_cells config option is True.
- api_fixture = self.useFixture(nova_fixtures.OSAPIFixture(
- api_version='v2.1'))
- self.admin_api = api_fixture.admin_api
+ self.admin_api = self.api_fixture.admin_api
self.admin_api.microversion = '2.69'
servers = self.admin_api.get_servers(
search_opts={'hostname': "cell3-inst0"})
@@ -1263,9 +1262,7 @@ class ServerTestV269(integrated_helpers._IntegratedTestBase):
self.assertEqual(self.up_cell_insts[2], servers[0]['id'])
def test_get_servers_detail_all_tenants_with_down_cells(self):
- api_fixture = self.useFixture(nova_fixtures.OSAPIFixture(
- api_version='v2.1'))
- self.admin_api = api_fixture.admin_api
+ self.admin_api = self.api_fixture.admin_api
self.admin_api.microversion = '2.69'
servers = self.admin_api.get_servers(search_opts={'all_tenants': True})
# 4 servers from the up cells and 4 servers from the down cells
@@ -1518,15 +1515,97 @@ class ServerRebuildTestCase(integrated_helpers._IntegratedTestBase):
'volume-backed server', str(resp))
+class ServerRebuildTestCaseV293(integrated_helpers._IntegratedTestBase):
+ api_major_version = 'v2.1'
+
+ def setUp(self):
+ super(ServerRebuildTestCaseV293, self).setUp()
+ self.cinder = nova_fixtures.CinderFixture(self)
+ self.useFixture(self.cinder)
+
+ def _bfv_server(self):
+ server_req_body = {
+ # There is no imageRef because this is boot from volume.
+ 'server': {
+ 'flavorRef': '1', # m1.tiny from DefaultFlavorsFixture,
+ 'name': 'test_volume_backed_rebuild_different_image',
+ 'networks': [],
+ 'block_device_mapping_v2': [{
+ 'boot_index': 0,
+ 'uuid':
+ nova_fixtures.CinderFixture.IMAGE_BACKED_VOL,
+ 'source_type': 'volume',
+ 'destination_type': 'volume'
+ }]
+ }
+ }
+ server = self.api.post_server(server_req_body)
+ return self._wait_for_state_change(server, 'ACTIVE')
+
+ def _test_rebuild(self, server):
+ self.api.microversion = '2.93'
+ # Now rebuild the server with a different image than was used to create
+ # our fake volume.
+ rebuild_image_ref = self.glance.auto_disk_config_enabled_image['id']
+ rebuild_req_body = {'rebuild': {'imageRef': rebuild_image_ref}}
+
+ with mock.patch.object(self.compute.manager.virtapi,
+ 'wait_for_instance_event'):
+ self.api.api_post('/servers/%s/action' % server['id'],
+ rebuild_req_body,
+ check_response_status=[202])
+
+ def test_volume_backed_rebuild_root_v293(self):
+ server = self._bfv_server()
+ self._test_rebuild(server)
+
+ def test_volume_backed_rebuild_root_create_failed(self):
+ server = self._bfv_server()
+ error = cinder_exception.ClientException(code=500)
+ with mock.patch.object(volume.cinder.API, 'attachment_create',
+ side_effect=error):
+ # We expect this to fail because we are doing cast-as-call
+ self.assertRaises(client.OpenStackApiException,
+ self._test_rebuild, server)
+ server = self.api.get_server(server['id'])
+ self.assertIn('Failed to rebuild volume backed instance',
+ server['fault']['message'])
+ self.assertEqual('ERROR', server['status'])
+
+ def test_volume_backed_rebuild_root_instance_deleted(self):
+ server = self._bfv_server()
+ error = exception.InstanceNotFound(instance_id=server['id'])
+ with mock.patch.object(self.compute.manager, '_detach_root_volume',
+ side_effect=error):
+ # We expect this to fail because we are doing cast-as-call
+ self.assertRaises(client.OpenStackApiException,
+ self._test_rebuild, server)
+ server = self.api.get_server(server['id'])
+ self.assertIn('Failed to rebuild volume backed instance',
+ server['fault']['message'])
+ self.assertEqual('ERROR', server['status'])
+
+ def test_volume_backed_rebuild_root_delete_old_failed(self):
+ server = self._bfv_server()
+ error = cinder_exception.ClientException(code=500)
+ with mock.patch.object(volume.cinder.API, 'attachment_delete',
+ side_effect=error):
+ # We expect this to fail because we are doing cast-as-call
+ self.assertRaises(client.OpenStackApiException,
+ self._test_rebuild, server)
+ server = self.api.get_server(server['id'])
+ self.assertIn('Failed to rebuild volume backed instance',
+ server['fault']['message'])
+ self.assertEqual('ERROR', server['status'])
+
+
class ServersTestV280(integrated_helpers._IntegratedTestBase):
api_major_version = 'v2.1'
def setUp(self):
super(ServersTestV280, self).setUp()
- api_fixture = self.useFixture(nova_fixtures.OSAPIFixture(
- api_version='v2.1'))
- self.api = api_fixture.api
- self.admin_api = api_fixture.admin_api
+ self.api = self.api_fixture.api
+ self.admin_api = self.api_fixture.admin_api
self.api.microversion = '2.80'
self.admin_api.microversion = '2.80'
@@ -1585,9 +1664,8 @@ class ServersTestV280(integrated_helpers._IntegratedTestBase):
project_id_1 = '4906260553374bf0a5d566543b320516'
project_id_2 = 'c850298c1b6b4796a8f197ac310b2469'
- new_api_fixture = self.useFixture(nova_fixtures.OSAPIFixture(
- api_version=self.api_major_version, project_id=project_id_1))
- new_admin_api = new_api_fixture.admin_api
+ new_admin_api = self.api_fixture.alternative_admin_api
+ new_admin_api.project_id = project_id_1
new_admin_api.microversion = '2.80'
post = {
@@ -2182,7 +2260,8 @@ class ServerMovingTests(integrated_helpers.ProviderUsageBaseTestCase):
}
server = self._evacuate_server(
- server, extra_post_args=post, expected_host=dest_hostname)
+ server, extra_post_args=post, expected_host=dest_hostname,
+ expected_state='ACTIVE')
# Run the periodics to show those don't modify allocations.
self._run_periodics()
@@ -2359,7 +2438,8 @@ class ServerMovingTests(integrated_helpers.ProviderUsageBaseTestCase):
# stay ACTIVE and task_state will be set to None.
server = self._evacuate_server(
server, expected_task_state=None,
- expected_migration_status='failed')
+ expected_migration_status='failed',
+ expected_state='ACTIVE')
# Run the periodics to show those don't modify allocations.
self._run_periodics()
@@ -2519,6 +2599,57 @@ class ServerMovingTests(integrated_helpers.ProviderUsageBaseTestCase):
self._delete_and_check_allocations(server)
+ def test_shelve_unshelve_to_host(self):
+ source_hostname = self.compute1.host
+ dest_hostname = self.compute2.host
+ source_rp_uuid = self._get_provider_uuid_by_host(source_hostname)
+ dest_rp_uuid = \
+ self._get_provider_uuid_by_host(dest_hostname)
+
+ server = self._boot_then_shelve_and_check_allocations(
+ source_hostname, source_rp_uuid)
+
+ self._shelve_offload_and_check_allocations(server, source_rp_uuid)
+
+ req = {
+ 'unshelve': {'host': dest_hostname}
+ }
+
+ self.api.post_server_action(server['id'], req)
+ self._wait_for_server_parameter(
+ server, {'OS-EXT-SRV-ATTR:host': dest_hostname, 'status': 'ACTIVE'}
+ )
+
+ self.assertFlavorMatchesUsage(dest_rp_uuid, self.flavor1)
+
+ # the server has an allocation on only the dest node
+ self.assertFlavorMatchesAllocation(
+ self.flavor1, server['id'], dest_rp_uuid)
+
+ self._delete_and_check_allocations(server)
+
+ def test_shelve_unshelve_to_host_instance_not_offloaded(self):
+ source_hostname = self.compute1.host
+ dest_hostname = self.compute2.host
+ source_rp_uuid = self._get_provider_uuid_by_host(source_hostname)
+
+ server = self._boot_then_shelve_and_check_allocations(
+ source_hostname, source_rp_uuid)
+
+ req = {
+ 'unshelve': {'host': dest_hostname}
+ }
+
+ ex = self.assertRaises(
+ client.OpenStackApiException,
+ self.api.post_server_action,
+ server['id'], req
+ )
+ self.assertEqual(409, ex.response.status_code)
+ self.assertIn(
+ "The server status must be SHELVED_OFFLOADED",
+ ex.response.text)
+
def _shelve_offload_and_check_allocations(self, server, source_rp_uuid):
req = {
'shelveOffload': {}
@@ -5195,7 +5326,8 @@ class ServerMovingTestsWithNestedResourceRequests(
server = self._evacuate_server(
server, extra_post_args=post, expected_migration_status='error',
- expected_host=source_hostname)
+ expected_host=source_hostname,
+ expected_state='ACTIVE')
self.assertIn('Unable to move instance %s to host host2. The instance '
'has complex allocations on the source host so move '
@@ -5401,7 +5533,8 @@ class ServerMovingTestsFromFlatToNested(
self._evacuate_server(
server, extra_post_args=post, expected_host='host1',
- expected_migration_status='error')
+ expected_migration_status='error',
+ expected_state='ACTIVE')
# We expect that the evacuation will fail as force evacuate tries to
# blindly copy the source allocation to the destination but on the
@@ -6393,3 +6526,41 @@ class PortAndFlavorAccelsServerCreateTest(AcceleratorServerBase):
binding_profile = neutronapi.get_binding_profile(updated_port)
self.assertNotIn('arq_uuid', binding_profile)
self.assertNotIn('pci_slot', binding_profile)
+
+
+class PortBindingShelvedServerTest(integrated_helpers._IntegratedTestBase):
+ """Tests for servers with ports."""
+
+ compute_driver = 'fake.SmallFakeDriver'
+
+ def setUp(self):
+ super(PortBindingShelvedServerTest, self).setUp()
+ self.flavor_id = self._create_flavor(
+ disk=10, ephemeral=20, swap=5 * 1024)
+
+ def test_shelve_offload_with_port(self):
+ # Do not wait before offloading
+ self.flags(shelved_offload_time=0)
+
+ server = self._create_server(
+ flavor_id=self.flavor_id,
+ networks=[{'port': self.neutron.port_1['id']}])
+
+ port = self.neutron.show_port(self.neutron.port_1['id'])['port']
+
+ # Assert that the port is actually associated to the instance
+ self.assertEqual(port['device_id'], server['id'])
+ self.assertEqual(port['binding:host_id'], 'compute')
+ self.assertEqual(port['binding:status'], 'ACTIVE')
+
+ # Do shelve
+ server = self._shelve_server(server, 'SHELVED_OFFLOADED')
+
+ # Retrieve the updated port
+ port = self.neutron.show_port(self.neutron.port_1['id'])['port']
+
+ # Assert that the port is still associated to the instance
+ # but the binding is not on the compute anymore
+ self.assertEqual(port['device_id'], server['id'])
+ self.assertIsNone(port['binding:host_id'])
+ self.assertNotIn('binding:status', port)
diff --git a/nova/tests/functional/test_servers_provider_tree.py b/nova/tests/functional/test_servers_provider_tree.py
index 0eff6c6bda..da562c4f19 100644
--- a/nova/tests/functional/test_servers_provider_tree.py
+++ b/nova/tests/functional/test_servers_provider_tree.py
@@ -14,8 +14,8 @@
# under the License.
-import mock
import os_traits
+from unittest import mock
from oslo_log import log as logging
from oslo_utils.fixture import uuidsentinel as uuids
@@ -82,7 +82,7 @@ class ProviderTreeTests(integrated_helpers.ProviderUsageBaseTestCase):
},
'MEMORY_MB': {
'total': 8192,
- 'allocation_ratio': 1.5,
+ 'allocation_ratio': 1.0,
'max_unit': 8192,
'min_unit': 1,
'reserved': 512,
@@ -90,7 +90,7 @@ class ProviderTreeTests(integrated_helpers.ProviderUsageBaseTestCase):
},
'VCPU': {
'total': 10,
- 'allocation_ratio': 16.0,
+ 'allocation_ratio': 4.0,
'max_unit': 10,
'min_unit': 1,
'reserved': 0,
diff --git a/nova/tests/functional/test_servers_resource_request.py b/nova/tests/functional/test_servers_resource_request.py
index a8df84a5bc..9c91af7218 100644
--- a/nova/tests/functional/test_servers_resource_request.py
+++ b/nova/tests/functional/test_servers_resource_request.py
@@ -14,9 +14,9 @@
import copy
import logging
+from unittest import mock
from keystoneauth1 import adapter
-import mock
from neutronclient.common import exceptions as neutron_exception
import os_resource_classes as orc
from oslo_config import cfg
@@ -146,12 +146,13 @@ class ExtendedResourceRequestNeutronFixture(ResourceRequestNeutronFixture):
# port_resource_request_groups.py
{
"updated": "2021-08-02T10:00:00-00:00",
- "name": constants.RESOURCE_REQUEST_GROUPS_EXTENSION,
+ "name": "Port Resource Request Groups",
"links": [],
"alias": "port-resource-request-groups",
- "description":
+ "description": (
"Support requesting multiple groups of resources and "
"traits from the same RP subtree in resource_request"
+ ),
}
)
return extensions
@@ -458,7 +459,7 @@ class PortResourceRequestBasedSchedulingTestBase(
def _create_sriov_networking_rp_tree(self, hostname, compute_rp_uuid):
# Create a matching RP tree in placement for the PCI devices added to
- # the passthrough_whitelist config during setUp() and PCI devices
+ # the device_spec config during setUp() and PCI devices
# present in the FakeDriverWithPciResources virt driver.
#
# * PF1 represents the PCI device 0000:01:00, it will be mapped to
@@ -1067,7 +1068,7 @@ class PortResourceRequestBasedSchedulingTest(
def test_interface_attach_sriov_with_qos_pci_update_fails(self):
# Update the name of the network device RP of PF2 on host2 to something
# unexpected. This will cause
- # update_pci_request_spec_with_allocated_interface_name() to raise
+ # update_pci_request_with_placement_allocations() to raise
# when the sriov interface is attached.
rsp = self.placement.put(
'/resource_providers/%s'
@@ -1114,7 +1115,7 @@ class PortResourceRequestBasedSchedulingTest(
):
# Update the name of the network device RP of PF2 on host2 to something
# unexpected. This will cause
- # update_pci_request_spec_with_allocated_interface_name() to raise
+ # update_pci_request_with_placement_allocations() to raise
# when the sriov interface is attached.
rsp = self.placement.put(
'/resource_providers/%s'
@@ -1361,7 +1362,7 @@ class PortResourceRequestBasedSchedulingTest(
does not have resource request can be allocated to PF2 or PF3.
For the detailed compute host config see the FakeDriverWithPciResources
- class. For the necessary passthrough_whitelist config see the setUp of
+ class. For the necessary device_spec config see the setUp of
the PortResourceRequestBasedSchedulingTestBase class.
"""
@@ -1922,7 +1923,7 @@ class ServerMoveWithPortResourceRequestTest(
def test_migrate_server_with_qos_port_pci_update_fail_not_reschedule(self):
# Update the name of the network device RP of PF2 on host2 to something
# unexpected. This will cause
- # update_pci_request_spec_with_allocated_interface_name() to raise
+ # update_pci_request_with_placement_allocations() to raise
# when the instance is migrated to the host2.
rsp = self.placement.put(
'/resource_providers/%s'
@@ -1942,7 +1943,7 @@ class ServerMoveWithPortResourceRequestTest(
non_qos_port, qos_port, qos_sriov_port)
# The compute manager on host2 will raise from
- # update_pci_request_spec_with_allocated_interface_name which will
+ # update_pci_request_with_placement_allocations which will
# intentionally not trigger a re-schedule even if there is host3 as an
# alternate.
self.api.post_server_action(server['id'], {'migrate': None})
@@ -2161,7 +2162,8 @@ class ServerMoveWithPortResourceRequestTest(
# simply fail and the server remains on the source host
server = self._evacuate_server(
server, expected_host='host1', expected_task_state=None,
- expected_migration_status='failed')
+ expected_migration_status='failed',
+ expected_state="ACTIVE")
# As evacuation failed the resource allocation should be untouched
self._check_allocation(
@@ -2185,7 +2187,7 @@ class ServerMoveWithPortResourceRequestTest(
def test_evacuate_with_qos_port_pci_update_fail(self):
# Update the name of the network device RP of PF2 on host2 to something
# unexpected. This will cause
- # update_pci_request_spec_with_allocated_interface_name() to raise
+ # update_pci_request_with_placement_allocations() to raise
# when the instance is evacuated to the host2.
rsp = self.placement.put(
'/resource_providers/%s'
@@ -2206,7 +2208,7 @@ class ServerMoveWithPortResourceRequestTest(
self.compute1_service_id, {'forced_down': 'true'})
# The compute manager on host2 will raise from
- # update_pci_request_spec_with_allocated_interface_name
+ # update_pci_request_with_placement_allocations
server = self._evacuate_server(
server, expected_host='host1', expected_state='ERROR',
expected_task_state=None, expected_migration_status='failed')
@@ -2362,7 +2364,7 @@ class ServerMoveWithPortResourceRequestTest(
def test_live_migrate_with_qos_port_pci_update_fails(self):
# Update the name of the network device RP of PF2 on host2 to something
# unexpected. This will cause
- # update_pci_request_spec_with_allocated_interface_name() to raise
+ # update_pci_request_with_placement_allocations() to raise
# when the instance is live migrated to the host2.
rsp = self.placement.put(
'/resource_providers/%s'
@@ -2503,7 +2505,7 @@ class ServerMoveWithPortResourceRequestTest(
def test_unshelve_offloaded_server_with_qos_port_pci_update_fails(self):
# Update the name of the network device RP of PF2 on host2 to something
# unexpected. This will cause
- # update_pci_request_spec_with_allocated_interface_name() to raise
+ # update_pci_request_with_placement_allocations() to raise
# when the instance is unshelved to the host2.
rsp = self.placement.put(
'/resource_providers/%s'
@@ -2536,7 +2538,7 @@ class ServerMoveWithPortResourceRequestTest(
self.api.post_server_action(server['id'], {'unshelve': None})
# Unshelve fails on host2 due to
- # update_pci_request_spec_with_allocated_interface_name fails so the
+ # update_pci_request_with_placement_allocations fails so the
# instance goes back to shelve offloaded state
self.notifier.wait_for_versioned_notifications(
'instance.unshelve.start')
@@ -2978,6 +2980,7 @@ class ExtendedResourceRequestOldCompute(
super().setUp()
self.neutron = self.useFixture(
ExtendedResourceRequestNeutronFixture(self))
+ self.api.microversion = '2.72'
@mock.patch.object(
objects.service, 'get_minimum_version_all_cells',
diff --git a/nova/tests/functional/test_service.py b/nova/tests/functional/test_service.py
index 65b41594bd..21e9a519ee 100644
--- a/nova/tests/functional/test_service.py
+++ b/nova/tests/functional/test_service.py
@@ -10,8 +10,12 @@
# License for the specific language governing permissions and limitations
# under the License.
+import functools
from unittest import mock
+import fixtures
+from oslo_utils.fixture import uuidsentinel as uuids
+
from nova import context as nova_context
from nova import exception
from nova.objects import service
@@ -19,6 +23,7 @@ from nova import test
from nova.tests import fixtures as nova_fixtures
from nova.tests.functional import fixtures as func_fixtures
from nova.tests.functional import integrated_helpers
+from nova.virt import node
class ServiceTestCase(test.TestCase,
@@ -137,3 +142,83 @@ class TestOldComputeCheck(
return_value=old_version):
self.assertRaises(
exception.TooOldComputeService, self._start_compute, 'host1')
+
+
+class TestComputeStartupChecks(test.TestCase):
+ STUB_COMPUTE_ID = False
+
+ def setUp(self):
+ super().setUp()
+ self.useFixture(nova_fixtures.RealPolicyFixture())
+ self.useFixture(nova_fixtures.NeutronFixture(self))
+ self.useFixture(nova_fixtures.GlanceFixture(self))
+ self.useFixture(func_fixtures.PlacementFixture())
+
+ self._local_uuid = str(uuids.node)
+
+ self.useFixture(fixtures.MockPatch(
+ 'nova.virt.node.get_local_node_uuid',
+ functools.partial(self.local_uuid, True)))
+ self.useFixture(fixtures.MockPatch(
+ 'nova.virt.node.read_local_node_uuid',
+ self.local_uuid))
+ self.useFixture(fixtures.MockPatch(
+ 'nova.virt.node.write_local_node_uuid',
+ mock.DEFAULT))
+ self.flags(compute_driver='fake.FakeDriverWithoutFakeNodes')
+
+ def local_uuid(self, get=False):
+ if get and not self._local_uuid:
+ # Simulate the get_local_node_uuid behavior of calling write once
+ self._local_uuid = str(uuids.node)
+ node.write_local_node_uuid(self._local_uuid)
+ return self._local_uuid
+
+ def test_compute_node_identity_greenfield(self):
+ # Level-set test case to show that starting and re-starting without
+ # any error cases works as expected.
+
+ # Start with no local compute_id
+ self._local_uuid = None
+ self.start_service('compute')
+
+ # Start should have generated and written a compute id
+ node.write_local_node_uuid.assert_called_once_with(str(uuids.node))
+
+ # Starting again should succeed and not cause another write
+ self.start_service('compute')
+ node.write_local_node_uuid.assert_called_once_with(str(uuids.node))
+
+ def test_compute_node_identity_deleted(self):
+ self.start_service('compute')
+
+ # Simulate the compute_id file being deleted
+ self._local_uuid = None
+
+ # Should refuse to start because it's not our first time and the file
+ # being missing is a hard error.
+ exc = self.assertRaises(exception.InvalidConfiguration,
+ self.start_service, 'compute')
+ self.assertIn('lost that state', str(exc))
+
+ def test_compute_node_hostname_changed(self):
+ # Start our compute once to create the node record
+ self.start_service('compute')
+
+ # Starting with a different hostname should trigger the abort
+ exc = self.assertRaises(exception.InvalidConfiguration,
+ self.start_service, 'compute', host='other')
+ self.assertIn('hypervisor_hostname', str(exc))
+
+ def test_compute_node_uuid_changed(self):
+ # Start our compute once to create the node record
+ self.start_service('compute')
+
+ # Simulate a changed local compute_id file
+ self._local_uuid = str(uuids.othernode)
+
+ # We should fail to create the compute node record again, but with a
+ # useful error message about why.
+ exc = self.assertRaises(exception.InvalidConfiguration,
+ self.start_service, 'compute')
+ self.assertIn('Duplicate compute node record', str(exc))
diff --git a/nova/tests/functional/test_unified_limits.py b/nova/tests/functional/test_unified_limits.py
new file mode 100644
index 0000000000..64d59b47d7
--- /dev/null
+++ b/nova/tests/functional/test_unified_limits.py
@@ -0,0 +1,217 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo_limit import fixture as limit_fixture
+from oslo_serialization import base64
+from oslo_utils.fixture import uuidsentinel as uuids
+
+from nova import context as nova_context
+from nova.limit import local as local_limit
+from nova.objects import flavor as flavor_obj
+from nova.objects import instance_group as group_obj
+from nova.tests.functional.api import client
+from nova.tests.functional import integrated_helpers
+
+
+class UnifiedLimitsTest(integrated_helpers._IntegratedTestBase):
+
+ def setUp(self):
+ super(UnifiedLimitsTest, self).setUp()
+ # Use different project_ids for non-admin and admin.
+ self.api.project_id = 'fake'
+ self.admin_api.project_id = 'admin'
+
+ self.flags(driver="nova.quota.UnifiedLimitsDriver", group="quota")
+ reglimits = {local_limit.SERVER_METADATA_ITEMS: 128,
+ local_limit.INJECTED_FILES: 5,
+ local_limit.INJECTED_FILES_CONTENT: 10 * 1024,
+ local_limit.INJECTED_FILES_PATH: 255,
+ local_limit.KEY_PAIRS: 100,
+ local_limit.SERVER_GROUPS: 10,
+ local_limit.SERVER_GROUP_MEMBERS: 1,
+ 'servers': 4,
+ 'class:VCPU': 8,
+ 'class:MEMORY_MB': 32768,
+ 'class:DISK_GB': 250}
+ projlimits = {self.api.project_id: {'servers': 2,
+ 'class:VCPU': 4,
+ 'class:MEMORY_MB': 16384,
+ 'class:DISK_GB': 100}}
+ self.useFixture(limit_fixture.LimitFixture(reglimits, projlimits))
+ self.ctx = nova_context.get_admin_context()
+
+ def _setup_services(self):
+ # Use driver with lots of resources so we don't get NoValidHost while
+ # testing quotas. Need to do this before services are started.
+ self.flags(compute_driver='fake.FakeDriver')
+ super(UnifiedLimitsTest, self)._setup_services()
+
+ def test_servers(self):
+ # First test the project limit using the non-admin project.
+ for i in range(2):
+ self._create_server(api=self.api)
+
+ # Attempt to create a third server should fail.
+ e = self.assertRaises(
+ client.OpenStackApiException, self._create_server, api=self.api)
+ self.assertEqual(403, e.response.status_code)
+ self.assertIn('servers', e.response.text)
+
+ # Then test the default limit using the admin project.
+ for i in range(4):
+ self._create_server(api=self.admin_api)
+
+ # Attempt to create a fifth server should fail.
+ e = self.assertRaises(
+ client.OpenStackApiException, self._create_server,
+ api=self.admin_api)
+ self.assertEqual(403, e.response.status_code)
+ self.assertIn('servers', e.response.text)
+
+ def test_vcpu(self):
+ # First test the project limit using the non-admin project.
+ # m1.large has vcpus=4 and our project limit is 4, should succeed.
+ flavor = flavor_obj.Flavor.get_by_name(self.ctx, 'm1.large')
+ self._create_server(api=self.api, flavor_id=flavor.flavorid)
+
+ # m1.small has vcpus=1, should fail because we are at quota.
+ flavor = flavor_obj.Flavor.get_by_name(self.ctx, 'm1.small')
+ e = self.assertRaises(
+ client.OpenStackApiException, self._create_server, api=self.api,
+ flavor_id=flavor.flavorid)
+ self.assertEqual(403, e.response.status_code)
+ self.assertIn('class:VCPU', e.response.text)
+
+ # Then test the default limit of 8 using the admin project.
+ flavor = flavor_obj.Flavor.get_by_name(self.ctx, 'm1.large')
+ for i in range(2):
+ self._create_server(api=self.admin_api, flavor_id=flavor.flavorid)
+
+ # Attempt to create another server with vcpus=1 should fail because we
+ # are at quota.
+ flavor = flavor_obj.Flavor.get_by_name(self.ctx, 'm1.small')
+ e = self.assertRaises(
+ client.OpenStackApiException, self._create_server,
+ api=self.admin_api, flavor_id=flavor.flavorid)
+ self.assertEqual(403, e.response.status_code)
+ self.assertIn('class:VCPU', e.response.text)
+
+ def test_memory_mb(self):
+ # First test the project limit using the non-admin project.
+ flavor = flavor_obj.Flavor(
+ context=self.ctx, memory_mb=16384, vcpus=1, root_gb=1,
+ flavorid='9', name='m1.custom')
+ flavor.create()
+ self._create_server(api=self.api, flavor_id=flavor.flavorid)
+
+ # Attempt to create another should fail as we are at quota.
+ e = self.assertRaises(
+ client.OpenStackApiException, self._create_server, api=self.api,
+ flavor_id=flavor.flavorid)
+ self.assertEqual(403, e.response.status_code)
+ self.assertIn('class:MEMORY_MB', e.response.text)
+
+ # Then test the default limit of 32768 using the admin project.
+ for i in range(2):
+ self._create_server(api=self.admin_api, flavor_id=flavor.flavorid)
+
+ # Attempt to create another server should fail because we are at quota.
+ e = self.assertRaises(
+ client.OpenStackApiException, self._create_server,
+ api=self.admin_api, flavor_id=flavor.flavorid)
+ self.assertEqual(403, e.response.status_code)
+ self.assertIn('class:MEMORY_MB', e.response.text)
+
+ def test_disk_gb(self):
+ # First test the project limit using the non-admin project.
+ flavor = flavor_obj.Flavor(
+ context=self.ctx, memory_mb=1, vcpus=1, root_gb=100,
+ flavorid='9', name='m1.custom')
+ flavor.create()
+ self._create_server(api=self.api, flavor_id=flavor.flavorid)
+
+ # Attempt to create another should fail as we are at quota.
+ e = self.assertRaises(
+ client.OpenStackApiException, self._create_server, api=self.api,
+ flavor_id=flavor.flavorid)
+ self.assertEqual(403, e.response.status_code)
+ self.assertIn('class:DISK_GB', e.response.text)
+
+ # Then test the default limit of 250 using the admin project.
+ for i in range(2):
+ self._create_server(api=self.admin_api, flavor_id=flavor.flavorid)
+
+ # Attempt to create another server should fail because we are at quota.
+ e = self.assertRaises(
+ client.OpenStackApiException, self._create_server,
+ api=self.admin_api, flavor_id=flavor.flavorid)
+ self.assertEqual(403, e.response.status_code)
+ self.assertIn('class:DISK_GB', e.response.text)
+
+ def test_no_injected_files(self):
+ self._create_server()
+
+ def test_max_injected_files(self):
+ # Quota is 5.
+ files = []
+ contents = base64.encode_as_text('some content')
+ for i in range(5):
+ files.append(('/my/path%d' % i, contents))
+ server = self._build_server()
+ personality = [
+ {'path': item[0], 'contents': item[1]} for item in files]
+ server['personality'] = personality
+ self.api.post_server({'server': server})
+
+ def test_max_injected_file_content_bytes(self):
+ # Quota is 10 * 1024
+ # Hm, apparently quota is checked against the base64 encoded string
+ # even though the api-ref claims the limit is for the decoded data.
+ # Subtract 3072 characters to account for that.
+ content = base64.encode_as_bytes(
+ ''.join(['a' for i in range(10 * 1024 - 3072)]))
+ server = self._build_server()
+ personality = [{'path': '/test/path', 'contents': content}]
+ server['personality'] = personality
+ self.api.post_server({'server': server})
+
+ def test_max_injected_file_path_bytes(self):
+ # Quota is 255.
+ path = ''.join(['a' for i in range(255)])
+ contents = base64.encode_as_text('some content')
+ server = self._build_server()
+ personality = [{'path': path, 'contents': contents}]
+ server['personality'] = personality
+ self.api.post_server({'server': server})
+
+ def test_server_group_members(self):
+ # Create a server group.
+ instance_group = group_obj.InstanceGroup(
+ self.ctx, policy="anti-affinity")
+ instance_group.name = "foo"
+ instance_group.project_id = self.ctx.project_id
+ instance_group.user_id = self.ctx.user_id
+ instance_group.uuid = uuids.instance_group
+ instance_group.create()
+
+ # Quota for server group members is 1.
+ server = self._build_server()
+ hints = {'group': uuids.instance_group}
+ req = {'server': server, 'os:scheduler_hints': hints}
+ self.admin_api.post_server(req)
+
+ # Attempt to create another server in the group should fail because we
+ # are at quota.
+ e = self.assertRaises(
+ client.OpenStackApiException, self.admin_api.post_server, req)
+ self.assertEqual(403, e.response.status_code)
+ self.assertIn('server_group_members', e.response.text)
diff --git a/nova/tests/unit/accelerator/test_cyborg.py b/nova/tests/unit/accelerator/test_cyborg.py
index c8f3944514..2d814c74a1 100644
--- a/nova/tests/unit/accelerator/test_cyborg.py
+++ b/nova/tests/unit/accelerator/test_cyborg.py
@@ -13,7 +13,7 @@
# under the License.
import itertools
-import mock
+from unittest import mock
from keystoneauth1 import exceptions as ks_exc
from requests.models import Response
@@ -190,7 +190,7 @@ class CyborgTestCase(test.NoDBTestCase):
},
"attach_handle_type": "PCI",
"state": "Bound",
- # Devic eprofile name is common to all bound ARQs
+ # Device profile name is common to all bound ARQs
"device_profile_name": arqs[0]["device_profile_name"],
**common
}
@@ -367,7 +367,7 @@ class CyborgTestCase(test.NoDBTestCase):
# If only some ARQs are resolved, return just the resolved ones
unbound_arqs, _ = self._get_arqs_and_request_groups()
_, bound_arqs = self._get_bound_arqs()
- # Create a amixture of unbound and bound ARQs
+ # Create a mixture of unbound and bound ARQs
arqs = [unbound_arqs[0], bound_arqs[0]]
instance_uuid = bound_arqs[0]['instance_uuid']
@@ -487,7 +487,7 @@ class CyborgTestCase(test.NoDBTestCase):
self.assertEqual(bound_arqs, ret_arqs)
def test_get_arq_pci_device_profile(self):
- """Test extractin arq pci device info"""
+ """Test extracting arq pci device info"""
arq = {'uuid': uuids.arq_uuid,
'device_profile_name': "smart_nic",
'device_profile_group_id': '5',
diff --git a/nova/tests/unit/api/openstack/compute/admin_only_action_common.py b/nova/tests/unit/api/openstack/compute/admin_only_action_common.py
index 37fd1012b7..f332d9f32f 100644
--- a/nova/tests/unit/api/openstack/compute/admin_only_action_common.py
+++ b/nova/tests/unit/api/openstack/compute/admin_only_action_common.py
@@ -12,8 +12,9 @@
# License for the specific language governing permissions and limitations
# under the License.
+from unittest import mock
+
import fixtures
-import mock
from oslo_utils import timeutils
from oslo_utils import uuidutils
import webob
diff --git a/nova/tests/unit/api/openstack/compute/test_admin_password.py b/nova/tests/unit/api/openstack/compute/test_admin_password.py
index 90a4a2983b..67e4c743d5 100644
--- a/nova/tests/unit/api/openstack/compute/test_admin_password.py
+++ b/nova/tests/unit/api/openstack/compute/test_admin_password.py
@@ -13,7 +13,9 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+
+from unittest import mock
+
import webob
from nova.api.openstack.compute import admin_password as admin_password_v21
diff --git a/nova/tests/unit/api/openstack/compute/test_aggregates.py b/nova/tests/unit/api/openstack/compute/test_aggregates.py
index fb096861eb..21d644f0be 100644
--- a/nova/tests/unit/api/openstack/compute/test_aggregates.py
+++ b/nova/tests/unit/api/openstack/compute/test_aggregates.py
@@ -15,7 +15,8 @@
"""Tests for the aggregates admin api."""
-import mock
+from unittest import mock
+
from oslo_utils.fixture import uuidsentinel
from webob import exc
diff --git a/nova/tests/unit/api/openstack/compute/test_api.py b/nova/tests/unit/api/openstack/compute/test_api.py
index ca54be9a74..d1bb6babb7 100644
--- a/nova/tests/unit/api/openstack/compute/test_api.py
+++ b/nova/tests/unit/api/openstack/compute/test_api.py
@@ -143,7 +143,7 @@ class APITest(test.NoDBTestCase):
self.assertEqual(resp.headers[key], str(value))
def test_quota_error_mapping(self):
- self._do_test_exception_mapping(exception.QuotaError, 'too many used')
+ self._do_test_exception_mapping(exception.OverQuota, 'too many used')
def test_non_nova_notfound_exception_mapping(self):
class ExceptionWithCode(Exception):
diff --git a/nova/tests/unit/api/openstack/compute/test_attach_interfaces.py b/nova/tests/unit/api/openstack/compute/test_attach_interfaces.py
index 526cb6011d..e4719ea052 100644
--- a/nova/tests/unit/api/openstack/compute/test_attach_interfaces.py
+++ b/nova/tests/unit/api/openstack/compute/test_attach_interfaces.py
@@ -13,7 +13,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from webob import exc
from nova.api.openstack import common
diff --git a/nova/tests/unit/api/openstack/compute/test_availability_zone.py b/nova/tests/unit/api/openstack/compute/test_availability_zone.py
index f355eb436a..a408e0d1aa 100644
--- a/nova/tests/unit/api/openstack/compute/test_availability_zone.py
+++ b/nova/tests/unit/api/openstack/compute/test_availability_zone.py
@@ -13,9 +13,9 @@
# under the License.
import datetime
+from unittest import mock
import iso8601
-import mock
from oslo_serialization import jsonutils
from oslo_utils.fixture import uuidsentinel
diff --git a/nova/tests/unit/api/openstack/compute/test_baremetal_nodes.py b/nova/tests/unit/api/openstack/compute/test_baremetal_nodes.py
index 55a8b03216..c8ad907b10 100644
--- a/nova/tests/unit/api/openstack/compute/test_baremetal_nodes.py
+++ b/nova/tests/unit/api/openstack/compute/test_baremetal_nodes.py
@@ -13,13 +13,12 @@
# License for the specific language governing permissions and limitations
# under the License.
+from unittest import mock
from ironicclient import exc as ironic_exc
-import mock
from webob import exc
-from nova.api.openstack.compute import baremetal_nodes \
- as b_nodes_v21
+from nova.api.openstack.compute import baremetal_nodes as b_nodes_v21
from nova import context
from nova import exception
from nova import test
diff --git a/nova/tests/unit/api/openstack/compute/test_console_auth_tokens.py b/nova/tests/unit/api/openstack/compute/test_console_auth_tokens.py
index 429096d51d..a1f3d1e63d 100644
--- a/nova/tests/unit/api/openstack/compute/test_console_auth_tokens.py
+++ b/nova/tests/unit/api/openstack/compute/test_console_auth_tokens.py
@@ -14,8 +14,8 @@
# under the License.
import copy
+from unittest import mock
-import mock
import webob
from nova.api.openstack import api_version_request
diff --git a/nova/tests/unit/api/openstack/compute/test_console_output.py b/nova/tests/unit/api/openstack/compute/test_console_output.py
index 1a76a445fc..a9dc830255 100644
--- a/nova/tests/unit/api/openstack/compute/test_console_output.py
+++ b/nova/tests/unit/api/openstack/compute/test_console_output.py
@@ -14,8 +14,8 @@
# under the License.
import string
+from unittest import mock
-import mock
import webob
from nova.api.openstack.compute import console_output \
diff --git a/nova/tests/unit/api/openstack/compute/test_create_backup.py b/nova/tests/unit/api/openstack/compute/test_create_backup.py
index f7280a5a37..9728002e88 100644
--- a/nova/tests/unit/api/openstack/compute/test_create_backup.py
+++ b/nova/tests/unit/api/openstack/compute/test_create_backup.py
@@ -13,7 +13,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from oslo_utils import timeutils
import webob
@@ -40,10 +41,6 @@ class CreateBackupTestsV21(admin_only_action_common.CommonMixin,
self.controller = getattr(self.create_backup, self.controller_name)()
self.compute_api = self.controller.compute_api
- patch_get = mock.patch.object(self.compute_api, 'get')
- self.mock_get = patch_get.start()
- self.addCleanup(patch_get.stop)
-
@mock.patch.object(common, 'check_img_metadata_properties_quota')
@mock.patch.object(api.API, 'backup')
def test_create_backup_with_metadata(self, mock_backup, mock_check_image):
diff --git a/nova/tests/unit/api/openstack/compute/test_deferred_delete.py b/nova/tests/unit/api/openstack/compute/test_deferred_delete.py
index db6f774c51..8a1c8efd57 100644
--- a/nova/tests/unit/api/openstack/compute/test_deferred_delete.py
+++ b/nova/tests/unit/api/openstack/compute/test_deferred_delete.py
@@ -13,7 +13,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
import webob
from nova.api.openstack.compute import deferred_delete as dd_v21
diff --git a/nova/tests/unit/api/openstack/compute/test_disk_config.py b/nova/tests/unit/api/openstack/compute/test_disk_config.py
index bf3be1d0a3..c5ee59722a 100644
--- a/nova/tests/unit/api/openstack/compute/test_disk_config.py
+++ b/nova/tests/unit/api/openstack/compute/test_disk_config.py
@@ -14,8 +14,8 @@
# under the License.
import datetime
+from unittest import mock
-import mock
from oslo_serialization import jsonutils
from nova.api.openstack import compute
diff --git a/nova/tests/unit/api/openstack/compute/test_evacuate.py b/nova/tests/unit/api/openstack/compute/test_evacuate.py
index 6620d7a180..bd88bb8d6e 100644
--- a/nova/tests/unit/api/openstack/compute/test_evacuate.py
+++ b/nova/tests/unit/api/openstack/compute/test_evacuate.py
@@ -12,8 +12,9 @@
# License for the specific language governing permissions and limitations
# under the License.
+from unittest import mock
+
import fixtures
-import mock
from oslo_utils.fixture import uuidsentinel as uuids
import testtools
import webob
@@ -415,3 +416,32 @@ class EvacuateTestV268(EvacuateTestV229):
def test_forced_evacuate_with_no_host_provided(self):
# not applicable for v2.68, which removed the 'force' parameter
pass
+
+
+class EvacuateTestV295(EvacuateTestV268):
+ def setUp(self):
+ super(EvacuateTestV268, self).setUp()
+ self.admin_req = fakes.HTTPRequest.blank('', use_admin_context=True,
+ version='2.95')
+ self.req = fakes.HTTPRequest.blank('', version='2.95')
+ self.mock_get_min_ver = self.useFixture(fixtures.MockPatch(
+ 'nova.objects.service.get_minimum_version_all_cells',
+ return_value=62)).mock
+
+ def test_evacuate_version_error(self):
+ self.mock_get_min_ver.return_value = 61
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self._get_evacuate_response,
+ {'host': 'my-host', 'adminPass': 'foo'})
+
+ def test_evacuate_unsupported_rpc(self):
+ def fake_evacuate(*args, **kwargs):
+ raise exception.UnsupportedRPCVersion(
+ api="fakeapi",
+ required="x.xx")
+
+ self.stub_out('nova.compute.api.API.evacuate', fake_evacuate)
+ self._check_evacuate_failure(webob.exc.HTTPConflict,
+ {'host': 'my-host',
+ 'onSharedStorage': 'False',
+ 'adminPass': 'MyNewPass'})
diff --git a/nova/tests/unit/api/openstack/compute/test_flavor_access.py b/nova/tests/unit/api/openstack/compute/test_flavor_access.py
index 8c25a2efc2..ea9ca2f632 100644
--- a/nova/tests/unit/api/openstack/compute/test_flavor_access.py
+++ b/nova/tests/unit/api/openstack/compute/test_flavor_access.py
@@ -14,8 +14,8 @@
# under the License.
import datetime
+from unittest import mock
-import mock
from webob import exc
from nova.api.openstack import api_version_request as api_version
@@ -353,14 +353,37 @@ class FlavorAccessTestV21(test.NoDBTestCase):
mock_verify.assert_called_once_with(
req.environ['nova.context'], 'proj2')
+ @mock.patch('nova.objects.Flavor.remove_access')
@mock.patch('nova.api.openstack.identity.verify_project_id',
side_effect=exc.HTTPBadRequest(
explanation="Project ID proj2 is not a valid project."))
- def test_remove_tenant_access_with_invalid_tenant(self, mock_verify):
+ def test_remove_tenant_access_with_invalid_tenant(self,
+ mock_verify,
+ mock_remove_access):
"""Tests the case that the tenant does not exist in Keystone."""
req = fakes.HTTPRequest.blank(self._prefix + '/flavors/2/action',
use_admin_context=True)
body = {'removeTenantAccess': {'tenant': 'proj2'}}
+
+ self.flavor_action_controller._remove_tenant_access(
+ req, '2', body=body)
+ mock_verify.assert_called_once_with(
+ req.environ['nova.context'], 'proj2')
+ mock_remove_access.assert_called_once_with('proj2')
+
+ @mock.patch('nova.api.openstack.identity.verify_project_id',
+ side_effect=exc.HTTPBadRequest(
+ explanation="Nova was unable to find Keystone "
+ "service endpoint."))
+ def test_remove_tenant_access_missing_keystone_endpoint(self,
+ mock_verify):
+ """Tests the case that Keystone identity service endpoint
+ version 3.0 was not found.
+ """
+ req = fakes.HTTPRequest.blank(self._prefix + '/flavors/2/action',
+ use_admin_context=True)
+ body = {'removeTenantAccess': {'tenant': 'proj2'}}
+
self.assertRaises(exc.HTTPBadRequest,
self.flavor_action_controller._remove_tenant_access,
req, '2', body=body)
diff --git a/nova/tests/unit/api/openstack/compute/test_flavor_manage.py b/nova/tests/unit/api/openstack/compute/test_flavor_manage.py
index f8412c772c..948f255f34 100644
--- a/nova/tests/unit/api/openstack/compute/test_flavor_manage.py
+++ b/nova/tests/unit/api/openstack/compute/test_flavor_manage.py
@@ -14,8 +14,8 @@
# under the License.
import copy
+from unittest import mock
-import mock
from oslo_serialization import jsonutils
import webob
diff --git a/nova/tests/unit/api/openstack/compute/test_flavors.py b/nova/tests/unit/api/openstack/compute/test_flavors.py
index 4390b32012..c7fbf5c468 100644
--- a/nova/tests/unit/api/openstack/compute/test_flavors.py
+++ b/nova/tests/unit/api/openstack/compute/test_flavors.py
@@ -13,9 +13,9 @@
# License for the specific language governing permissions and limitations
# under the License.
+from unittest import mock
from urllib import parse as urlparse
-import mock
import webob
from nova.api.openstack import common
diff --git a/nova/tests/unit/api/openstack/compute/test_flavors_extra_specs.py b/nova/tests/unit/api/openstack/compute/test_flavors_extra_specs.py
index e68bf7e306..8355ce59b5 100644
--- a/nova/tests/unit/api/openstack/compute/test_flavors_extra_specs.py
+++ b/nova/tests/unit/api/openstack/compute/test_flavors_extra_specs.py
@@ -13,7 +13,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
import testtools
import webob
diff --git a/nova/tests/unit/api/openstack/compute/test_floating_ip_pools.py b/nova/tests/unit/api/openstack/compute/test_floating_ip_pools.py
index e25302ee9a..71ca209672 100644
--- a/nova/tests/unit/api/openstack/compute/test_floating_ip_pools.py
+++ b/nova/tests/unit/api/openstack/compute/test_floating_ip_pools.py
@@ -13,7 +13,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from nova.api.openstack.compute import floating_ip_pools \
as fipp_v21
diff --git a/nova/tests/unit/api/openstack/compute/test_floating_ips.py b/nova/tests/unit/api/openstack/compute/test_floating_ips.py
index 2cb89dfe76..7093c5a80d 100644
--- a/nova/tests/unit/api/openstack/compute/test_floating_ips.py
+++ b/nova/tests/unit/api/openstack/compute/test_floating_ips.py
@@ -14,7 +14,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from oslo_utils.fixture import uuidsentinel as uuids
import webob
diff --git a/nova/tests/unit/api/openstack/compute/test_hosts.py b/nova/tests/unit/api/openstack/compute/test_hosts.py
index 7adc698093..f1cdde2917 100644
--- a/nova/tests/unit/api/openstack/compute/test_hosts.py
+++ b/nova/tests/unit/api/openstack/compute/test_hosts.py
@@ -13,7 +13,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from oslo_utils.fixture import uuidsentinel
import testtools
import webob.exc
diff --git a/nova/tests/unit/api/openstack/compute/test_hypervisors.py b/nova/tests/unit/api/openstack/compute/test_hypervisors.py
index facc5389be..a908988811 100644
--- a/nova/tests/unit/api/openstack/compute/test_hypervisors.py
+++ b/nova/tests/unit/api/openstack/compute/test_hypervisors.py
@@ -14,8 +14,8 @@
# under the License.
import copy
+from unittest import mock
-import mock
import netaddr
from oslo_serialization import jsonutils
from oslo_utils.fixture import uuidsentinel as uuids
@@ -368,25 +368,23 @@ class HypervisorsTestV21(test.NoDBTestCase):
return TEST_SERVICES[0]
raise exception.ComputeHostNotFound(host=host)
- @mock.patch.object(self.controller.host_api, 'compute_node_get_all',
- return_value=compute_nodes)
- @mock.patch.object(self.controller.host_api,
- 'service_get_by_compute_host',
- fake_service_get_by_compute_host)
- def _test(self, compute_node_get_all):
- req = self._get_request(True)
- result = self.controller.index(req)
- self.assertEqual(1, len(result['hypervisors']))
- expected = {
- 'id': compute_nodes[0].uuid if self.expect_uuid_for_id
- else compute_nodes[0].id,
- 'hypervisor_hostname': compute_nodes[0].hypervisor_hostname,
- 'state': 'up',
- 'status': 'enabled',
- }
- self.assertDictEqual(expected, result['hypervisors'][0])
+ m_get = self.controller.host_api.compute_node_get_all
+ m_get.side_effect = None
+ m_get.return_value = compute_nodes
+ self.controller.host_api.service_get_by_compute_host.side_effect = (
+ fake_service_get_by_compute_host)
- _test(self)
+ req = self._get_request(True)
+ result = self.controller.index(req)
+ self.assertEqual(1, len(result['hypervisors']))
+ expected = {
+ 'id': compute_nodes[0].uuid if self.expect_uuid_for_id
+ else compute_nodes[0].id,
+ 'hypervisor_hostname': compute_nodes[0].hypervisor_hostname,
+ 'state': 'up',
+ 'status': 'enabled',
+ }
+ self.assertDictEqual(expected, result['hypervisors'][0])
def test_index_compute_host_not_mapped(self):
"""Tests that we don't fail index if a host is not mapped."""
@@ -402,25 +400,22 @@ class HypervisorsTestV21(test.NoDBTestCase):
return TEST_SERVICES[0]
raise exception.HostMappingNotFound(name=host)
- @mock.patch.object(self.controller.host_api, 'compute_node_get_all',
- return_value=compute_nodes)
- @mock.patch.object(self.controller.host_api,
- 'service_get_by_compute_host',
- fake_service_get_by_compute_host)
- def _test(self, compute_node_get_all):
- req = self._get_request(True)
- result = self.controller.index(req)
- self.assertEqual(1, len(result['hypervisors']))
- expected = {
- 'id': compute_nodes[0].uuid if self.expect_uuid_for_id
- else compute_nodes[0].id,
- 'hypervisor_hostname': compute_nodes[0].hypervisor_hostname,
- 'state': 'up',
- 'status': 'enabled',
- }
- self.assertDictEqual(expected, result['hypervisors'][0])
+ self.controller.host_api.compute_node_get_all.return_value = (
+ compute_nodes)
+ self.controller.host_api.service_get_by_compute_host = (
+ fake_service_get_by_compute_host)
- _test(self)
+ req = self._get_request(True)
+ result = self.controller.index(req)
+ self.assertEqual(1, len(result['hypervisors']))
+ expected = {
+ 'id': compute_nodes[0].uuid if self.expect_uuid_for_id
+ else compute_nodes[0].id,
+ 'hypervisor_hostname': compute_nodes[0].hypervisor_hostname,
+ 'state': 'up',
+ 'status': 'enabled',
+ }
+ self.assertDictEqual(expected, result['hypervisors'][0])
def test_detail(self):
req = self._get_request(True)
@@ -444,32 +439,30 @@ class HypervisorsTestV21(test.NoDBTestCase):
return TEST_SERVICES[0]
raise exception.ComputeHostNotFound(host=host)
- @mock.patch.object(self.controller.host_api, 'compute_node_get_all',
- return_value=compute_nodes)
- @mock.patch.object(self.controller.host_api,
- 'service_get_by_compute_host',
- fake_service_get_by_compute_host)
- def _test(self, compute_node_get_all):
- req = self._get_request(True)
- result = self.controller.detail(req)
- self.assertEqual(1, len(result['hypervisors']))
- expected = {
- 'id': compute_nodes[0].id,
- 'hypervisor_hostname': compute_nodes[0].hypervisor_hostname,
- 'state': 'up',
- 'status': 'enabled',
- }
- # we don't care about all of the details, just make sure we get
- # the subset we care about and there are more keys than what index
- # would return
- hypervisor = result['hypervisors'][0]
- self.assertTrue(
- set(expected.keys()).issubset(set(hypervisor.keys())))
- self.assertGreater(len(hypervisor.keys()), len(expected.keys()))
- self.assertEqual(compute_nodes[0].hypervisor_hostname,
- hypervisor['hypervisor_hostname'])
-
- _test(self)
+ m_get = self.controller.host_api.compute_node_get_all
+ m_get.side_effect = None
+ m_get.return_value = compute_nodes
+ self.controller.host_api.service_get_by_compute_host.side_effect = (
+ fake_service_get_by_compute_host)
+
+ req = self._get_request(True)
+ result = self.controller.detail(req)
+ self.assertEqual(1, len(result['hypervisors']))
+ expected = {
+ 'id': compute_nodes[0].id,
+ 'hypervisor_hostname': compute_nodes[0].hypervisor_hostname,
+ 'state': 'up',
+ 'status': 'enabled',
+ }
+ # we don't care about all of the details, just make sure we get
+ # the subset we care about and there are more keys than what index
+ # would return
+ hypervisor = result['hypervisors'][0]
+ self.assertTrue(
+ set(expected.keys()).issubset(set(hypervisor.keys())))
+ self.assertGreater(len(hypervisor.keys()), len(expected.keys()))
+ self.assertEqual(compute_nodes[0].hypervisor_hostname,
+ hypervisor['hypervisor_hostname'])
def test_detail_compute_host_not_mapped(self):
"""Tests that if a service is deleted but the compute node is not we
@@ -487,32 +480,28 @@ class HypervisorsTestV21(test.NoDBTestCase):
return TEST_SERVICES[0]
raise exception.HostMappingNotFound(name=host)
- @mock.patch.object(self.controller.host_api, 'compute_node_get_all',
- return_value=compute_nodes)
- @mock.patch.object(self.controller.host_api,
- 'service_get_by_compute_host',
- fake_service_get_by_compute_host)
- def _test(self, compute_node_get_all):
- req = self._get_request(True)
- result = self.controller.detail(req)
- self.assertEqual(1, len(result['hypervisors']))
- expected = {
- 'id': compute_nodes[0].id,
- 'hypervisor_hostname': compute_nodes[0].hypervisor_hostname,
- 'state': 'up',
- 'status': 'enabled',
- }
- # we don't care about all of the details, just make sure we get
- # the subset we care about and there are more keys than what index
- # would return
- hypervisor = result['hypervisors'][0]
- self.assertTrue(
- set(expected.keys()).issubset(set(hypervisor.keys())))
- self.assertGreater(len(hypervisor.keys()), len(expected.keys()))
- self.assertEqual(compute_nodes[0].hypervisor_hostname,
- hypervisor['hypervisor_hostname'])
-
- _test(self)
+ self.controller.host_api.service_get_by_compute_host.side_effect = (
+ fake_service_get_by_compute_host)
+ self.controller.host_api.compute_node_get_all.return_value = (
+ compute_nodes)
+ req = self._get_request(True)
+ result = self.controller.detail(req)
+ self.assertEqual(1, len(result['hypervisors']))
+ expected = {
+ 'id': compute_nodes[0].id,
+ 'hypervisor_hostname': compute_nodes[0].hypervisor_hostname,
+ 'state': 'up',
+ 'status': 'enabled',
+ }
+ # we don't care about all of the details, just make sure we get
+ # the subset we care about and there are more keys than what index
+ # would return
+ hypervisor = result['hypervisors'][0]
+ self.assertTrue(
+ set(expected.keys()).issubset(set(hypervisor.keys())))
+ self.assertGreater(len(hypervisor.keys()), len(expected.keys()))
+ self.assertEqual(compute_nodes[0].hypervisor_hostname,
+ hypervisor['hypervisor_hostname'])
def test_show(self):
req = self._get_request(True)
@@ -525,21 +514,16 @@ class HypervisorsTestV21(test.NoDBTestCase):
"""Tests that if a service is deleted but the compute node is not we
don't fail when listing hypervisors.
"""
-
- @mock.patch.object(self.controller.host_api, 'compute_node_get',
- return_value=self.TEST_HYPERS_OBJ[0])
- @mock.patch.object(self.controller.host_api,
- 'service_get_by_compute_host')
- def _test(self, mock_service, mock_compute_node_get):
- req = self._get_request(True)
- mock_service.side_effect = exception.HostMappingNotFound(
- name='foo')
- hyper_id = self._get_hyper_id()
- self.assertRaises(exc.HTTPNotFound, self.controller.show,
- req, hyper_id)
- self.assertTrue(mock_service.called)
- mock_compute_node_get.assert_called_once_with(mock.ANY, hyper_id)
- _test(self)
+ self.controller.host_api.service_get_by_compute_host.side_effect = (
+ exception.HostMappingNotFound(name='foo'))
+ req = self._get_request(True)
+ hyper_id = self._get_hyper_id()
+ self.assertRaises(
+ exc.HTTPNotFound, self.controller.show, req, hyper_id)
+ self.assertTrue(
+ self.controller.host_api.service_get_by_compute_host.called)
+ self.controller.host_api.compute_node_get.assert_called_once_with(
+ mock.ANY, hyper_id)
def test_show_noid(self):
req = self._get_request(True)
@@ -611,20 +595,15 @@ class HypervisorsTestV21(test.NoDBTestCase):
mock.ANY, self.TEST_HYPERS_OBJ[0].host)
def test_uptime_hypervisor_not_mapped_service_get(self):
- @mock.patch.object(self.controller.host_api, 'compute_node_get')
- @mock.patch.object(self.controller.host_api, 'get_host_uptime')
- @mock.patch.object(self.controller.host_api,
- 'service_get_by_compute_host',
- side_effect=exception.HostMappingNotFound(
- name='dummy'))
- def _test(mock_get, _, __):
- req = self._get_request(True)
- hyper_id = self._get_hyper_id()
- self.assertRaises(exc.HTTPNotFound,
- self.controller.uptime, req, hyper_id)
- self.assertTrue(mock_get.called)
+ self.controller.host_api.service_get_by_compute_host.side_effect = (
+ exception.HostMappingNotFound(name='dummy'))
- _test()
+ req = self._get_request(True)
+ hyper_id = self._get_hyper_id()
+ self.assertRaises(exc.HTTPNotFound,
+ self.controller.uptime, req, hyper_id)
+ self.assertTrue(
+ self.controller.host_api.service_get_by_compute_host.called)
def test_uptime_hypervisor_not_mapped(self):
with mock.patch.object(self.controller.host_api, 'get_host_uptime',
@@ -644,30 +623,26 @@ class HypervisorsTestV21(test.NoDBTestCase):
self.assertEqual(dict(hypervisors=self.INDEX_HYPER_DICTS), result)
def test_search_non_exist(self):
- with mock.patch.object(self.controller.host_api,
- 'compute_node_search_by_hypervisor',
- return_value=[]) as mock_node_search:
- req = self._get_request(True)
- self.assertRaises(exc.HTTPNotFound, self.controller.search,
- req, 'a')
- self.assertEqual(1, mock_node_search.call_count)
+ m_search = self.controller.host_api.compute_node_search_by_hypervisor
+ m_search.side_effect = None
+ m_search.return_value = []
+
+ req = self._get_request(True)
+ self.assertRaises(exc.HTTPNotFound, self.controller.search, req, 'a')
+ self.assertEqual(1, m_search.call_count)
def test_search_unmapped(self):
+ m_search = self.controller.host_api.compute_node_search_by_hypervisor
+ m_search.side_effect = None
+ m_search.return_value = [mock.MagicMock()]
- @mock.patch.object(self.controller.host_api,
- 'compute_node_search_by_hypervisor')
- @mock.patch.object(self.controller.host_api,
- 'service_get_by_compute_host')
- def _test(mock_service, mock_search):
- mock_search.return_value = [mock.MagicMock()]
- mock_service.side_effect = exception.HostMappingNotFound(
- name='foo')
- req = self._get_request(True)
- self.assertRaises(exc.HTTPNotFound, self.controller.search,
- req, 'a')
- self.assertTrue(mock_service.called)
+ self.controller.host_api.service_get_by_compute_host.side_effect = (
+ exception.HostMappingNotFound(name='foo'))
- _test()
+ req = self._get_request(True)
+ self.assertRaises(exc.HTTPNotFound, self.controller.search, req, 'a')
+ self.assertTrue(
+ self.controller.host_api.service_get_by_compute_host.called)
@mock.patch.object(objects.InstanceList, 'get_by_host',
side_effect=fake_instance_get_all_by_host)
@@ -702,15 +677,12 @@ class HypervisorsTestV21(test.NoDBTestCase):
def test_servers_compute_host_not_found(self):
req = self._get_request(True)
- with test.nested(
- mock.patch.object(
- self.controller.host_api, 'instance_get_all_by_host',
- side_effect=fake_instance_get_all_by_host,
- ),
- mock.patch.object(
- self.controller.host_api, 'service_get_by_compute_host',
- side_effect=exception.ComputeHostNotFound(host='foo'),
- ),
+ self.controller.host_api.service_get_by_compute_host.side_effect = (
+ exception.ComputeHostNotFound(host='foo'))
+ with mock.patch.object(
+ self.controller.host_api,
+ 'instance_get_all_by_host',
+ side_effect=fake_instance_get_all_by_host,
):
# The result should be empty since every attempt to fetch the
# service for a hypervisor "failed"
@@ -718,24 +690,25 @@ class HypervisorsTestV21(test.NoDBTestCase):
self.assertEqual({'hypervisors': []}, result)
def test_servers_non_id(self):
- with mock.patch.object(self.controller.host_api,
- 'compute_node_search_by_hypervisor',
- return_value=[]) as mock_node_search:
- req = self._get_request(True)
- self.assertRaises(exc.HTTPNotFound,
- self.controller.servers,
- req, '115')
- self.assertEqual(1, mock_node_search.call_count)
+ m_search = self.controller.host_api.compute_node_search_by_hypervisor
+ m_search.side_effect = None
+ m_search.return_value = []
+
+ req = self._get_request(True)
+ self.assertRaises(exc.HTTPNotFound,
+ self.controller.servers,
+ req, '115')
+ self.assertEqual(1, m_search.call_count)
def test_servers_with_non_integer_hypervisor_id(self):
- with mock.patch.object(self.controller.host_api,
- 'compute_node_search_by_hypervisor',
- return_value=[]) as mock_node_search:
+ m_search = self.controller.host_api.compute_node_search_by_hypervisor
+ m_search.side_effect = None
+ m_search.return_value = []
- req = self._get_request(True)
- self.assertRaises(exc.HTTPNotFound,
- self.controller.servers, req, 'abc')
- self.assertEqual(1, mock_node_search.call_count)
+ req = self._get_request(True)
+ self.assertRaises(
+ exc.HTTPNotFound, self.controller.servers, req, 'abc')
+ self.assertEqual(1, m_search.call_count)
def test_servers_with_no_servers(self):
with mock.patch.object(self.controller.host_api,
@@ -1089,15 +1062,13 @@ class HypervisorsTestV253(HypervisorsTestV252):
use_admin_context=True,
url='/os-hypervisors?with_servers=1')
- with test.nested(
- mock.patch.object(
- self.controller.host_api, 'instance_get_all_by_host',
- side_effect=fake_instance_get_all_by_host,
- ),
- mock.patch.object(
- self.controller.host_api, 'service_get_by_compute_host',
- side_effect=exception.ComputeHostNotFound(host='foo'),
- ),
+ self.controller.host_api.service_get_by_compute_host.side_effect = (
+ exception.ComputeHostNotFound(host='foo'))
+
+ with mock.patch.object(
+ self.controller.host_api,
+ "instance_get_all_by_host",
+ side_effect=fake_instance_get_all_by_host,
):
# The result should be empty since every attempt to fetch the
# service for a hypervisor "failed"
@@ -1157,11 +1128,13 @@ class HypervisorsTestV253(HypervisorsTestV252):
use_admin_context=True,
url='/os-hypervisors?with_servers=yes&'
'hypervisor_hostname_pattern=shenzhen')
- with mock.patch.object(self.controller.host_api,
- 'compute_node_search_by_hypervisor',
- return_value=objects.ComputeNodeList()) as s:
- self.assertRaises(exc.HTTPNotFound, self.controller.index, req)
- s.assert_called_once_with(req.environ['nova.context'], 'shenzhen')
+ m_search = self.controller.host_api.compute_node_search_by_hypervisor
+ m_search.side_effect = None
+ m_search.return_value = objects.ComputeNodeList()
+
+ self.assertRaises(exc.HTTPNotFound, self.controller.index, req)
+ m_search.assert_called_once_with(
+ req.environ['nova.context'], 'shenzhen')
def test_detail_with_hostname_pattern(self):
"""Test listing hypervisors with details and using the
@@ -1170,13 +1143,14 @@ class HypervisorsTestV253(HypervisorsTestV252):
req = self._get_request(
use_admin_context=True,
url='/os-hypervisors?hypervisor_hostname_pattern=shenzhen')
- with mock.patch.object(
- self.controller.host_api,
- 'compute_node_search_by_hypervisor',
- return_value=objects.ComputeNodeList(objects=[TEST_HYPERS_OBJ[0]])
- ) as s:
- result = self.controller.detail(req)
- s.assert_called_once_with(req.environ['nova.context'], 'shenzhen')
+ m_search = self.controller.host_api.compute_node_search_by_hypervisor
+ m_search.side_effect = None
+ m_search.return_value = objects.ComputeNodeList(
+ objects=[TEST_HYPERS_OBJ[0]])
+
+ result = self.controller.detail(req)
+ m_search.assert_called_once_with(
+ req.environ['nova.context'], 'shenzhen')
expected = {'hypervisors': [self.DETAIL_HYPERS_DICTS[0]]}
@@ -1483,15 +1457,11 @@ class HypervisorsTestV288(HypervisorsTestV275):
self.controller.uptime, req)
def test_uptime_old_version(self):
- with mock.patch.object(
- self.controller.host_api, 'get_host_uptime',
- return_value='fake uptime',
- ):
- req = self._get_request(use_admin_context=True, version='2.87')
- hyper_id = self._get_hyper_id()
+ req = self._get_request(use_admin_context=True, version='2.87')
+ hyper_id = self._get_hyper_id()
- # no exception == pass
- self.controller.uptime(req, hyper_id)
+ # no exception == pass
+ self.controller.uptime(req, hyper_id)
def test_uptime_noid(self):
# the separate 'uptime' API has been removed, so skip this test
@@ -1526,34 +1496,36 @@ class HypervisorsTestV288(HypervisorsTestV275):
pass
def test_show_with_uptime_notimplemented(self):
- with mock.patch.object(
- self.controller.host_api, 'get_host_uptime',
- side_effect=NotImplementedError,
- ) as mock_get_uptime:
- req = self._get_request(use_admin_context=True)
- hyper_id = self._get_hyper_id()
+ self.controller.host_api.get_host_uptime.side_effect = (
+ NotImplementedError())
- result = self.controller.show(req, hyper_id)
+ req = self._get_request(use_admin_context=True)
+ hyper_id = self._get_hyper_id()
- expected_dict = copy.deepcopy(self.DETAIL_HYPERS_DICTS[0])
- expected_dict.update({'uptime': None})
- self.assertEqual({'hypervisor': expected_dict}, result)
- self.assertEqual(1, mock_get_uptime.call_count)
+ result = self.controller.show(req, hyper_id)
+
+ expected_dict = copy.deepcopy(self.DETAIL_HYPERS_DICTS[0])
+ expected_dict.update({'uptime': None})
+ self.assertEqual({'hypervisor': expected_dict}, result)
+ self.assertEqual(
+ 1, self.controller.host_api.get_host_uptime.call_count)
def test_show_with_uptime_hypervisor_down(self):
- with mock.patch.object(
- self.controller.host_api, 'get_host_uptime',
- side_effect=exception.ComputeServiceUnavailable(host='dummy')
- ) as mock_get_uptime:
- req = self._get_request(use_admin_context=True)
- hyper_id = self._get_hyper_id()
+ self.controller.host_api.get_host_uptime.side_effect = (
+ exception.ComputeServiceUnavailable(host='dummy'))
- result = self.controller.show(req, hyper_id)
+ req = self._get_request(use_admin_context=True)
+ hyper_id = self._get_hyper_id()
- expected_dict = copy.deepcopy(self.DETAIL_HYPERS_DICTS[0])
- expected_dict.update({'uptime': None})
- self.assertEqual({'hypervisor': expected_dict}, result)
- self.assertEqual(1, mock_get_uptime.call_count)
+ result = self.controller.show(req, hyper_id)
+
+ expected_dict = copy.deepcopy(self.DETAIL_HYPERS_DICTS[0])
+ expected_dict.update({'uptime': None})
+ self.assertEqual({'hypervisor': expected_dict}, result)
+ self.assertEqual(
+ 1,
+ self.controller.host_api.get_host_uptime.call_count
+ )
def test_show_old_version(self):
# ensure things still work as expected here
diff --git a/nova/tests/unit/api/openstack/compute/test_image_metadata.py b/nova/tests/unit/api/openstack/compute/test_image_metadata.py
index 2e1c26a712..4072d6f489 100644
--- a/nova/tests/unit/api/openstack/compute/test_image_metadata.py
+++ b/nova/tests/unit/api/openstack/compute/test_image_metadata.py
@@ -14,8 +14,8 @@
# under the License.
import copy
+from unittest import mock
-import mock
from oslo_serialization import jsonutils
import webob
diff --git a/nova/tests/unit/api/openstack/compute/test_images.py b/nova/tests/unit/api/openstack/compute/test_images.py
index fad4fcb5a2..734e755dd5 100644
--- a/nova/tests/unit/api/openstack/compute/test_images.py
+++ b/nova/tests/unit/api/openstack/compute/test_images.py
@@ -19,9 +19,9 @@ and as a WSGI layer
"""
import copy
+from unittest import mock
from urllib import parse as urlparse
-import mock
import webob
from nova.api.openstack.compute import images as images_v21
diff --git a/nova/tests/unit/api/openstack/compute/test_instance_actions.py b/nova/tests/unit/api/openstack/compute/test_instance_actions.py
index 04e9ae443e..df13e1d89d 100644
--- a/nova/tests/unit/api/openstack/compute/test_instance_actions.py
+++ b/nova/tests/unit/api/openstack/compute/test_instance_actions.py
@@ -15,9 +15,9 @@
import copy
import datetime
+from unittest import mock
import iso8601
-import mock
from oslo_policy import policy as oslo_policy
from oslo_utils.fixture import uuidsentinel as uuids
from webob import exc
diff --git a/nova/tests/unit/api/openstack/compute/test_keypairs.py b/nova/tests/unit/api/openstack/compute/test_keypairs.py
index 657973ffbd..590639d5ed 100644
--- a/nova/tests/unit/api/openstack/compute/test_keypairs.py
+++ b/nova/tests/unit/api/openstack/compute/test_keypairs.py
@@ -13,7 +13,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
import webob
from nova.api.openstack.compute import keypairs as keypairs_v21
@@ -37,6 +38,8 @@ keypair_data = {
FAKE_UUID = 'b48316c5-71e8-45e4-9884-6c78055b9b13'
+keypair_name_2_92_compatible = 'my-key@ my.host'
+
def fake_keypair(name):
return dict(test_keypair.fake_keypair,
@@ -110,16 +113,22 @@ class KeypairsTestV21(test.TestCase):
self.assertGreater(len(res_dict['keypair']['private_key']), 0)
self._assert_keypair_type(res_dict)
- def _test_keypair_create_bad_request_case(self,
- body,
- exception):
- self.assertRaises(exception,
- self.controller.create, self.req, body=body)
+ def _test_keypair_create_bad_request_case(
+ self, body, exception, error_msg=None
+ ):
+ if error_msg:
+ self.assertRaisesRegex(exception, error_msg,
+ self.controller.create,
+ self.req, body=body)
+ else:
+ self.assertRaises(exception,
+ self.controller.create, self.req, body=body)
def test_keypair_create_with_empty_name(self):
body = {'keypair': {'name': ''}}
self._test_keypair_create_bad_request_case(body,
- self.validation_error)
+ self.validation_error,
+ 'is too short')
def test_keypair_create_with_name_too_long(self):
body = {
@@ -128,7 +137,8 @@ class KeypairsTestV21(test.TestCase):
}
}
self._test_keypair_create_bad_request_case(body,
- self.validation_error)
+ self.validation_error,
+ 'is too long')
def test_keypair_create_with_name_leading_trailing_spaces(self):
body = {
@@ -136,8 +146,10 @@ class KeypairsTestV21(test.TestCase):
'name': ' test '
}
}
+ expected_msg = 'Can not start or end with whitespace.'
self._test_keypair_create_bad_request_case(body,
- self.validation_error)
+ self.validation_error,
+ expected_msg)
def test_keypair_create_with_name_leading_trailing_spaces_compat_mode(
self):
@@ -152,8 +164,21 @@ class KeypairsTestV21(test.TestCase):
'name': 'test/keypair'
}
}
+ expected_msg = 'Only expected characters'
self._test_keypair_create_bad_request_case(body,
- webob.exc.HTTPBadRequest)
+ self.validation_error,
+ expected_msg)
+
+ def test_keypair_create_with_special_characters(self):
+ body = {
+ 'keypair': {
+ 'name': keypair_name_2_92_compatible
+ }
+ }
+ expected_msg = 'Only expected characters'
+ self._test_keypair_create_bad_request_case(body,
+ self.validation_error,
+ expected_msg)
def test_keypair_import_bad_key(self):
body = {
@@ -167,8 +192,10 @@ class KeypairsTestV21(test.TestCase):
def test_keypair_create_with_invalid_keypair_body(self):
body = {'alpha': {'name': 'create_test'}}
+ expected_msg = "'keypair' is a required property"
self._test_keypair_create_bad_request_case(body,
- self.validation_error)
+ self.validation_error,
+ expected_msg)
def test_keypair_import(self):
body = {
@@ -228,50 +255,6 @@ class KeypairsTestV21(test.TestCase):
self.controller.create, self.req, body=body)
self.assertIn('Quota exceeded, too many key pairs.', ex.explanation)
- @mock.patch('nova.objects.Quotas.check_deltas')
- def test_keypair_create_over_quota_during_recheck(self, mock_check):
- # Simulate a race where the first check passes and the recheck fails.
- # First check occurs in compute/api.
- exc = exception.OverQuota(overs='key_pairs', usages={'key_pairs': 100})
- mock_check.side_effect = [None, exc]
- body = {
- 'keypair': {
- 'name': 'FAKE',
- },
- }
-
- self.assertRaises(webob.exc.HTTPForbidden,
- self.controller.create, self.req, body=body)
-
- ctxt = self.req.environ['nova.context']
- self.assertEqual(2, mock_check.call_count)
- call1 = mock.call(ctxt, {'key_pairs': 1}, ctxt.user_id)
- call2 = mock.call(ctxt, {'key_pairs': 0}, ctxt.user_id)
- mock_check.assert_has_calls([call1, call2])
-
- # Verify we removed the key pair that was added after the first
- # quota check passed.
- key_pairs = objects.KeyPairList.get_by_user(ctxt, ctxt.user_id)
- names = [key_pair.name for key_pair in key_pairs]
- self.assertNotIn('create_test', names)
-
- @mock.patch('nova.objects.Quotas.check_deltas')
- def test_keypair_create_no_quota_recheck(self, mock_check):
- # Disable recheck_quota.
- self.flags(recheck_quota=False, group='quota')
-
- body = {
- 'keypair': {
- 'name': 'create_test',
- },
- }
- self.controller.create(self.req, body=body)
-
- ctxt = self.req.environ['nova.context']
- # check_deltas should have been called only once.
- mock_check.assert_called_once_with(ctxt, {'key_pairs': 1},
- ctxt.user_id)
-
def test_keypair_create_duplicate(self):
self.stub_out("nova.objects.KeyPair.create",
db_key_pair_create_duplicate)
@@ -514,3 +497,82 @@ class KeypairsTestV275(test.TestCase):
version='2.75', use_admin_context=True)
self.assertRaises(exception.ValidationError, self.controller.delete,
req, 1)
+
+
+class KeypairsTestV292(test.TestCase):
+ wsgi_api_version = '2.92'
+ wsgi_old_api_version = '2.91'
+
+ def setUp(self):
+ super(KeypairsTestV292, self).setUp()
+ self.controller = keypairs_v21.KeypairController()
+ self.req = fakes.HTTPRequest.blank('', version=self.wsgi_api_version)
+ self.old_req = fakes.HTTPRequest.blank(
+ '', version=self.wsgi_old_api_version)
+
+ def test_keypair_create_no_longer_supported(self):
+ body = {
+ 'keypair': {
+ 'name': keypair_name_2_92_compatible,
+ }
+ }
+ self.assertRaises(exception.ValidationError, self.controller.create,
+ self.req, body=body)
+
+ def test_keypair_create_works_with_old_version(self):
+ body = {
+ 'keypair': {
+ 'name': 'fake',
+ }
+ }
+ res_dict = self.controller.create(self.old_req, body=body)
+ self.assertEqual('fake', res_dict['keypair']['name'])
+ self.assertGreater(len(res_dict['keypair']['private_key']), 0)
+
+ def test_keypair_import_works_with_new_version(self):
+ body = {
+ 'keypair': {
+ 'name': 'fake',
+ 'public_key': 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDBYIznA'
+ 'x9D7118Q1VKGpXy2HDiKyUTM8XcUuhQpo0srqb9rboUp4'
+ 'a9NmCwpWpeElDLuva707GOUnfaBAvHBwsRXyxHJjRaI6Y'
+ 'Qj2oLJwqvaSaWUbyT1vtryRqy6J3TecN0WINY71f4uymi'
+ 'MZP0wby4bKBcYnac8KiCIlvkEl0ETjkOGUq8OyWRmn7lj'
+ 'j5SESEUdBP0JnuTFKddWTU/wD6wydeJaUhBTqOlHn0kX1'
+ 'GyqoNTE1UEhcM5ZRWgfUZfTjVyDF2kGj3vJLCJtJ8LoGc'
+ 'j7YaN4uPg1rBle+izwE/tLonRrds+cev8p6krSSrxWOwB'
+ 'bHkXa6OciiJDvkRzJXzf',
+ }
+ }
+ res_dict = self.controller.create(self.req, body=body)
+ self.assertEqual('fake', res_dict['keypair']['name'])
+ self.assertNotIn('private_key', res_dict['keypair'])
+
+ def test_keypair_create_refuses_special_chars_with_old_version(self):
+ body = {
+ 'keypair': {
+ 'name': keypair_name_2_92_compatible,
+ }
+ }
+ self.assertRaises(exception.ValidationError, self.controller.create,
+ self.old_req, body=body)
+
+ def test_keypair_import_with_special_characters(self):
+ body = {
+ 'keypair': {
+ 'name': keypair_name_2_92_compatible,
+ 'public_key': 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDBYIznA'
+ 'x9D7118Q1VKGpXy2HDiKyUTM8XcUuhQpo0srqb9rboUp4'
+ 'a9NmCwpWpeElDLuva707GOUnfaBAvHBwsRXyxHJjRaI6Y'
+ 'Qj2oLJwqvaSaWUbyT1vtryRqy6J3TecN0WINY71f4uymi'
+ 'MZP0wby4bKBcYnac8KiCIlvkEl0ETjkOGUq8OyWRmn7lj'
+ 'j5SESEUdBP0JnuTFKddWTU/wD6wydeJaUhBTqOlHn0kX1'
+ 'GyqoNTE1UEhcM5ZRWgfUZfTjVyDF2kGj3vJLCJtJ8LoGc'
+ 'j7YaN4uPg1rBle+izwE/tLonRrds+cev8p6krSSrxWOwB'
+ 'bHkXa6OciiJDvkRzJXzf',
+ }
+ }
+
+ res_dict = self.controller.create(self.req, body=body)
+ self.assertEqual(keypair_name_2_92_compatible,
+ res_dict['keypair']['name'])
diff --git a/nova/tests/unit/api/openstack/compute/test_limits.py b/nova/tests/unit/api/openstack/compute/test_limits.py
index 31033e111d..1748023aa8 100644
--- a/nova/tests/unit/api/openstack/compute/test_limits.py
+++ b/nova/tests/unit/api/openstack/compute/test_limits.py
@@ -19,8 +19,9 @@ Tests dealing with HTTP rate-limiting.
from http import client as httplib
from io import StringIO
+from unittest import mock
-import mock
+from oslo_limit import fixture as limit_fixture
from oslo_serialization import jsonutils
from oslo_utils import encodeutils
@@ -29,8 +30,10 @@ from nova.api.openstack.compute import views
from nova.api.openstack import wsgi
import nova.context
from nova import exception
+from nova.limit import local as local_limit
+from nova.limit import placement as placement_limit
+from nova import objects
from nova.policies import limits as l_policies
-from nova import quota
from nova import test
from nova.tests.unit.api.openstack import fakes
from nova.tests.unit import matchers
@@ -48,12 +51,12 @@ class BaseLimitTestSuite(test.NoDBTestCase):
return {k: dict(limit=v, in_use=v // 2)
for k, v in self.absolute_limits.items()}
- mock_get_project_quotas = mock.patch.object(
+ patcher_get_project_quotas = mock.patch.object(
nova.quota.QUOTAS,
"get_project_quotas",
- side_effect = stub_get_project_quotas)
- mock_get_project_quotas.start()
- self.addCleanup(mock_get_project_quotas.stop)
+ side_effect=stub_get_project_quotas)
+ self.mock_get_project_quotas = patcher_get_project_quotas.start()
+ self.addCleanup(patcher_get_project_quotas.stop)
patcher = self.mock_can = mock.patch('nova.context.RequestContext.can')
self.mock_can = patcher.start()
self.addCleanup(patcher.stop)
@@ -150,16 +153,14 @@ class LimitsControllerTestV21(BaseLimitTestSuite):
return {k: dict(limit=v, in_use=v // 2)
for k, v in self.absolute_limits.items()}
- with mock.patch('nova.quota.QUOTAS.get_project_quotas') as \
- get_project_quotas:
- get_project_quotas.side_effect = _get_project_quotas
+ self.mock_get_project_quotas.side_effect = _get_project_quotas
- response = request.get_response(self.controller)
+ response = request.get_response(self.controller)
- body = jsonutils.loads(response.body)
- self.assertEqual(expected, body)
- get_project_quotas.assert_called_once_with(context, tenant_id,
- usages=True)
+ body = jsonutils.loads(response.body)
+ self.assertEqual(expected, body)
+ self.mock_get_project_quotas.assert_called_once_with(
+ context, tenant_id, usages=True)
def _do_test_used_limits(self, reserved):
request = self._get_index_request(tenant_id=None)
@@ -182,8 +183,7 @@ class LimitsControllerTestV21(BaseLimitTestSuite):
def stub_get_project_quotas(context, project_id, usages=True):
return limits
- self.stub_out('nova.quota.QUOTAS.get_project_quotas',
- stub_get_project_quotas)
+ self.mock_get_project_quotas.side_effect = stub_get_project_quotas
res = request.get_response(self.controller)
body = jsonutils.loads(res.body)
@@ -207,15 +207,15 @@ class LimitsControllerTestV21(BaseLimitTestSuite):
user_id=user_id,
project_id=project_id)
context = fake_req.environ["nova.context"]
- with mock.patch.object(quota.QUOTAS, 'get_project_quotas',
- return_value={}) as mock_get_quotas:
- fake_req.get_response(self.controller)
- self.assertEqual(2, self.mock_can.call_count)
- self.mock_can.assert_called_with(
- l_policies.OTHER_PROJECT_LIMIT_POLICY_NAME,
- target={"project_id": tenant_id})
- mock_get_quotas.assert_called_once_with(context,
- tenant_id, usages=True)
+ self.mock_get_project_quotas.side_effect = None
+ self.mock_get_project_quotas.return_value = {}
+
+ fake_req.get_response(self.controller)
+ self.assertEqual(2, self.mock_can.call_count)
+ self.mock_can.assert_called_with(
+ l_policies.OTHER_PROJECT_LIMIT_POLICY_NAME)
+ self.mock_get_project_quotas.assert_called_once_with(context,
+ tenant_id, usages=True)
def _test_admin_can_fetch_used_limits_for_own_project(self, req_get):
project_id = "123456"
@@ -227,11 +227,12 @@ class LimitsControllerTestV21(BaseLimitTestSuite):
project_id=project_id)
context = fake_req.environ["nova.context"]
- with mock.patch.object(quota.QUOTAS, 'get_project_quotas',
- return_value={}) as mock_get_quotas:
- fake_req.get_response(self.controller)
- mock_get_quotas.assert_called_once_with(context,
- project_id, usages=True)
+ self.mock_get_project_quotas.side_effect = None
+ self.mock_get_project_quotas.return_value = {}
+
+ fake_req.get_response(self.controller)
+ self.mock_get_project_quotas.assert_called_once_with(
+ context, project_id, usages=True)
def test_admin_can_fetch_used_limits_for_own_project(self):
req_get = {}
@@ -251,7 +252,7 @@ class LimitsControllerTestV21(BaseLimitTestSuite):
req_get = {'tenant_id': -1}
self._test_admin_can_fetch_used_limits_for_own_project(req_get)
- def test_admin_can_fetch_used_limits_with_unkown_param(self):
+ def test_admin_can_fetch_used_limits_with_unknown_param(self):
req_get = {'tenant_id': '123', 'unknown': 'unknown'}
self._test_admin_can_fetch_used_limits_for_own_project(req_get)
@@ -259,12 +260,13 @@ class LimitsControllerTestV21(BaseLimitTestSuite):
project_id = "123456"
fake_req = self._get_index_request(project_id=project_id)
context = fake_req.environ["nova.context"]
- with mock.patch.object(quota.QUOTAS, 'get_project_quotas',
- return_value={}) as mock_get_quotas:
- fake_req.get_response(self.controller)
+ self.mock_get_project_quotas.side_effect = None
+ self.mock_get_project_quotas.return_value = {}
- mock_get_quotas.assert_called_once_with(context,
- project_id, usages=True)
+ fake_req.get_response(self.controller)
+
+ self.mock_get_project_quotas.assert_called_once_with(
+ context, project_id, usages=True)
def test_used_ram_added(self):
fake_req = self._get_index_request()
@@ -272,28 +274,26 @@ class LimitsControllerTestV21(BaseLimitTestSuite):
def stub_get_project_quotas(context, project_id, usages=True):
return {'ram': {'limit': 512, 'in_use': 256}}
- with mock.patch.object(quota.QUOTAS, 'get_project_quotas',
- side_effect=stub_get_project_quotas
- ) as mock_get_quotas:
+ self.mock_get_project_quotas.side_effect = stub_get_project_quotas
- res = fake_req.get_response(self.controller)
- body = jsonutils.loads(res.body)
- abs_limits = body['limits']['absolute']
- self.assertIn('totalRAMUsed', abs_limits)
- self.assertEqual(256, abs_limits['totalRAMUsed'])
- self.assertEqual(1, mock_get_quotas.call_count)
+ res = fake_req.get_response(self.controller)
+ body = jsonutils.loads(res.body)
+ abs_limits = body['limits']['absolute']
+ self.assertIn('totalRAMUsed', abs_limits)
+ self.assertEqual(256, abs_limits['totalRAMUsed'])
+ self.assertEqual(1, self.mock_get_project_quotas.call_count)
def test_no_ram_quota(self):
fake_req = self._get_index_request()
- with mock.patch.object(quota.QUOTAS, 'get_project_quotas',
- return_value={}) as mock_get_quotas:
+ self.mock_get_project_quotas.side_effect = None
+ self.mock_get_project_quotas.return_value = {}
- res = fake_req.get_response(self.controller)
- body = jsonutils.loads(res.body)
- abs_limits = body['limits']['absolute']
- self.assertNotIn('totalRAMUsed', abs_limits)
- self.assertEqual(1, mock_get_quotas.call_count)
+ res = fake_req.get_response(self.controller)
+ body = jsonutils.loads(res.body)
+ abs_limits = body['limits']['absolute']
+ self.assertNotIn('totalRAMUsed', abs_limits)
+ self.assertEqual(1, self.mock_get_project_quotas.call_count)
class FakeHttplibSocket(object):
@@ -395,25 +395,24 @@ class LimitsControllerTestV236(BaseLimitTestSuite):
return {k: dict(limit=v, in_use=v // 2)
for k, v in absolute_limits.items()}
- with mock.patch('nova.quota.QUOTAS.get_project_quotas') as \
- get_project_quotas:
- get_project_quotas.side_effect = _get_project_quotas
- response = self.controller.index(self.req)
- expected_response = {
- "limits": {
- "rate": [],
- "absolute": {
- "maxTotalRAMSize": 512,
- "maxTotalInstances": 5,
- "maxTotalCores": 21,
- "maxTotalKeypairs": 10,
- "totalRAMUsed": 256,
- "totalCoresUsed": 10,
- "totalInstancesUsed": 2,
- },
+ self.mock_get_project_quotas.side_effect = _get_project_quotas
+
+ response = self.controller.index(self.req)
+ expected_response = {
+ "limits": {
+ "rate": [],
+ "absolute": {
+ "maxTotalRAMSize": 512,
+ "maxTotalInstances": 5,
+ "maxTotalCores": 21,
+ "maxTotalKeypairs": 10,
+ "totalRAMUsed": 256,
+ "totalCoresUsed": 10,
+ "totalInstancesUsed": 2,
},
- }
- self.assertEqual(expected_response, response)
+ },
+ }
+ self.assertEqual(expected_response, response)
class LimitsControllerTestV239(BaseLimitTestSuite):
@@ -433,21 +432,20 @@ class LimitsControllerTestV239(BaseLimitTestSuite):
return {k: dict(limit=v, in_use=v // 2)
for k, v in absolute_limits.items()}
- with mock.patch('nova.quota.QUOTAS.get_project_quotas') as \
- get_project_quotas:
- get_project_quotas.side_effect = _get_project_quotas
- response = self.controller.index(self.req)
- # staring from version 2.39 there is no 'maxImageMeta' field
- # in response after removing 'image-metadata' proxy API
- expected_response = {
- "limits": {
- "rate": [],
- "absolute": {
- "maxServerMeta": 1,
- },
+ self.mock_get_project_quotas.side_effect = _get_project_quotas
+
+ response = self.controller.index(self.req)
+ # starting from version 2.39 there is no 'maxImageMeta' field
+ # in response after removing 'image-metadata' proxy API
+ expected_response = {
+ "limits": {
+ "rate": [],
+ "absolute": {
+ "maxServerMeta": 1,
},
- }
- self.assertEqual(expected_response, response)
+ },
+ }
+ self.assertEqual(expected_response, response)
class LimitsControllerTestV275(BaseLimitTestSuite):
@@ -459,21 +457,170 @@ class LimitsControllerTestV275(BaseLimitTestSuite):
absolute_limits = {
"metadata_items": 1,
}
- req = fakes.HTTPRequest.blank("/?unkown=fake",
+ req = fakes.HTTPRequest.blank("/?unknown=fake",
version='2.74')
def _get_project_quotas(context, project_id, usages=True):
return {k: dict(limit=v, in_use=v // 2)
for k, v in absolute_limits.items()}
- with mock.patch('nova.quota.QUOTAS.get_project_quotas') as \
- get_project_quotas:
- get_project_quotas.side_effect = _get_project_quotas
- self.controller.index(req)
+ self.mock_get_project_quotas.side_effect = _get_project_quotas
+ self.controller.index(req)
+ self.controller.index(req)
def test_index_additional_query_param(self):
- req = fakes.HTTPRequest.blank("/?unkown=fake",
+ req = fakes.HTTPRequest.blank("/?unknown=fake",
version='2.75')
self.assertRaises(
exception.ValidationError,
self.controller.index, req=req)
+
+
+class NoopLimitsControllerTest(test.NoDBTestCase):
+ quota_driver = "nova.quota.NoopQuotaDriver"
+
+ def setUp(self):
+ super(NoopLimitsControllerTest, self).setUp()
+ self.flags(driver=self.quota_driver, group="quota")
+ self.controller = limits_v21.LimitsController()
+ # remove policy checks
+ patcher = self.mock_can = mock.patch('nova.context.RequestContext.can')
+ self.mock_can = patcher.start()
+ self.addCleanup(patcher.stop)
+
+ def test_index_v21(self):
+ req = fakes.HTTPRequest.blank("/")
+ response = self.controller.index(req)
+ expected_response = {
+ "limits": {
+ "rate": [],
+ "absolute": {
+ 'maxImageMeta': -1,
+ 'maxPersonality': -1,
+ 'maxPersonalitySize': -1,
+ 'maxSecurityGroupRules': -1,
+ 'maxSecurityGroups': -1,
+ 'maxServerGroupMembers': -1,
+ 'maxServerGroups': -1,
+ 'maxServerMeta': -1,
+ 'maxTotalCores': -1,
+ 'maxTotalFloatingIps': -1,
+ 'maxTotalInstances': -1,
+ 'maxTotalKeypairs': -1,
+ 'maxTotalRAMSize': -1,
+ 'totalCoresUsed': -1,
+ 'totalFloatingIpsUsed': -1,
+ 'totalInstancesUsed': -1,
+ 'totalRAMUsed': -1,
+ 'totalSecurityGroupsUsed': -1,
+ 'totalServerGroupsUsed': -1,
+ },
+ },
+ }
+ self.assertEqual(expected_response, response)
+
+ def test_index_v275(self):
+ req = fakes.HTTPRequest.blank("/?tenant_id=faketenant",
+ version='2.75')
+ response = self.controller.index(req)
+ expected_response = {
+ "limits": {
+ "rate": [],
+ "absolute": {
+ 'maxServerGroupMembers': -1,
+ 'maxServerGroups': -1,
+ 'maxServerMeta': -1,
+ 'maxTotalCores': -1,
+ 'maxTotalInstances': -1,
+ 'maxTotalKeypairs': -1,
+ 'maxTotalRAMSize': -1,
+ 'totalCoresUsed': -1,
+ 'totalInstancesUsed': -1,
+ 'totalRAMUsed': -1,
+ 'totalServerGroupsUsed': -1,
+ },
+ },
+ }
+ self.assertEqual(expected_response, response)
+
+
+class UnifiedLimitsControllerTest(NoopLimitsControllerTest):
+ quota_driver = "nova.quota.UnifiedLimitsDriver"
+
+ def setUp(self):
+ super(UnifiedLimitsControllerTest, self).setUp()
+ reglimits = {local_limit.SERVER_METADATA_ITEMS: 128,
+ local_limit.INJECTED_FILES: 5,
+ local_limit.INJECTED_FILES_CONTENT: 10 * 1024,
+ local_limit.INJECTED_FILES_PATH: 255,
+ local_limit.KEY_PAIRS: 100,
+ local_limit.SERVER_GROUPS: 12,
+ local_limit.SERVER_GROUP_MEMBERS: 10}
+ self.useFixture(limit_fixture.LimitFixture(reglimits, {}))
+
+ @mock.patch.object(placement_limit, "get_legacy_counts")
+ @mock.patch.object(placement_limit, "get_legacy_project_limits")
+ @mock.patch.object(objects.InstanceGroupList, "get_counts")
+ def test_index_v21(self, mock_count, mock_proj, mock_kcount):
+ mock_proj.return_value = {"instances": 1, "cores": 2, "ram": 3}
+ mock_kcount.return_value = {"instances": 4, "cores": 5, "ram": 6}
+ mock_count.return_value = {'project': {'server_groups': 9}}
+ req = fakes.HTTPRequest.blank("/")
+ response = self.controller.index(req)
+ expected_response = {
+ "limits": {
+ "rate": [],
+ "absolute": {
+ 'maxImageMeta': 128,
+ 'maxPersonality': 5,
+ 'maxPersonalitySize': 10240,
+ 'maxSecurityGroupRules': -1,
+ 'maxSecurityGroups': -1,
+ 'maxServerGroupMembers': 10,
+ 'maxServerGroups': 12,
+ 'maxServerMeta': 128,
+ 'maxTotalCores': 2,
+ 'maxTotalFloatingIps': -1,
+ 'maxTotalInstances': 1,
+ 'maxTotalKeypairs': 100,
+ 'maxTotalRAMSize': 3,
+ 'totalCoresUsed': 5,
+ 'totalFloatingIpsUsed': 0,
+ 'totalInstancesUsed': 4,
+ 'totalRAMUsed': 6,
+ 'totalSecurityGroupsUsed': 0,
+ 'totalServerGroupsUsed': 9,
+ },
+ },
+ }
+ self.assertEqual(expected_response, response)
+
+ @mock.patch.object(placement_limit, "get_legacy_counts")
+ @mock.patch.object(placement_limit, "get_legacy_project_limits")
+ @mock.patch.object(objects.InstanceGroupList, "get_counts")
+ def test_index_v275(self, mock_count, mock_proj, mock_kcount):
+ mock_proj.return_value = {"instances": 1, "cores": 2, "ram": 3}
+ mock_kcount.return_value = {"instances": 4, "cores": 5, "ram": 6}
+ mock_count.return_value = {'project': {'server_groups': 9}}
+ req = fakes.HTTPRequest.blank("/?tenant_id=faketenant",
+ version='2.75')
+ response = self.controller.index(req)
+ expected_response = {
+ "limits": {
+ "rate": [],
+ "absolute": {
+ 'maxServerGroupMembers': 10,
+ 'maxServerGroups': 12,
+ 'maxServerMeta': 128,
+ 'maxTotalCores': 2,
+ 'maxTotalInstances': 1,
+ 'maxTotalKeypairs': 100,
+ 'maxTotalRAMSize': 3,
+ 'totalCoresUsed': 5,
+ 'totalInstancesUsed': 4,
+ 'totalRAMUsed': 6,
+ 'totalServerGroupsUsed': 9,
+ },
+ },
+ }
+ self.assertEqual(expected_response, response)
diff --git a/nova/tests/unit/api/openstack/compute/test_lock_server.py b/nova/tests/unit/api/openstack/compute/test_lock_server.py
index a605e2bcdb..bf49bf2b73 100644
--- a/nova/tests/unit/api/openstack/compute/test_lock_server.py
+++ b/nova/tests/unit/api/openstack/compute/test_lock_server.py
@@ -13,7 +13,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from nova.api.openstack import api_version_request
from nova.api.openstack import common
@@ -114,7 +114,7 @@ class LockServerTestsV273(LockServerTestsV21):
self.controller._lock, self.req, instance.uuid, body=body)
self.assertIn("256 is not of type 'string'", str(exp))
- def test_lock_with_invalid_paramater(self):
+ def test_lock_with_invalid_parameter(self):
# This will fail from 2.73 since we have a schema check that allows
# only locked_reason
instance = fake_instance.fake_instance_obj(
diff --git a/nova/tests/unit/api/openstack/compute/test_microversions.py b/nova/tests/unit/api/openstack/compute/test_microversions.py
index c5b1ddb5e5..9f5dd90889 100644
--- a/nova/tests/unit/api/openstack/compute/test_microversions.py
+++ b/nova/tests/unit/api/openstack/compute/test_microversions.py
@@ -12,7 +12,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from oslo_serialization import jsonutils
from nova.api.openstack import api_version_request as api_version
diff --git a/nova/tests/unit/api/openstack/compute/test_migrate_server.py b/nova/tests/unit/api/openstack/compute/test_migrate_server.py
index 683759eccc..8d1c853206 100644
--- a/nova/tests/unit/api/openstack/compute/test_migrate_server.py
+++ b/nova/tests/unit/api/openstack/compute/test_migrate_server.py
@@ -13,8 +13,9 @@
# License for the specific language governing permissions and limitations
# under the License.
+from unittest import mock
+
import fixtures
-import mock
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import uuidutils
import webob
@@ -530,9 +531,8 @@ class MigrateServerTestsV256(MigrateServerTestsV234):
self.req, fakes.FAKE_UUID, body=body)
def _test_migrate_exception(self, exc_info, expected_result):
- @mock.patch.object(self.compute_api, 'get')
@mock.patch.object(self.compute_api, 'resize', side_effect=exc_info)
- def _test(mock_resize, mock_get):
+ def _test(mock_resize):
instance = objects.Instance(uuid=uuids.instance)
self.assertRaises(expected_result,
self.controller._migrate,
diff --git a/nova/tests/unit/api/openstack/compute/test_migrations.py b/nova/tests/unit/api/openstack/compute/test_migrations.py
index a06d395bea..19bc42a9de 100644
--- a/nova/tests/unit/api/openstack/compute/test_migrations.py
+++ b/nova/tests/unit/api/openstack/compute/test_migrations.py
@@ -13,9 +13,9 @@
# under the License.
import datetime
+from unittest import mock
import iso8601
-import mock
from oslo_utils.fixture import uuidsentinel as uuids
from webob import exc
diff --git a/nova/tests/unit/api/openstack/compute/test_multinic.py b/nova/tests/unit/api/openstack/compute/test_multinic.py
index ceaaebf373..17a872fed2 100644
--- a/nova/tests/unit/api/openstack/compute/test_multinic.py
+++ b/nova/tests/unit/api/openstack/compute/test_multinic.py
@@ -13,8 +13,9 @@
# License for the specific language governing permissions and limitations
# under the License.
+from unittest import mock
+
import fixtures
-import mock
import webob
from nova.api.openstack.compute import multinic as multinic_v21
diff --git a/nova/tests/unit/api/openstack/compute/test_networks.py b/nova/tests/unit/api/openstack/compute/test_networks.py
index 595353e7b1..bcbce58483 100644
--- a/nova/tests/unit/api/openstack/compute/test_networks.py
+++ b/nova/tests/unit/api/openstack/compute/test_networks.py
@@ -26,7 +26,7 @@ from nova import test
from nova.tests.unit.api.openstack import fakes
-# NOTE(stephenfin): obviously these aren't complete reponses, but this is all
+# NOTE(stephenfin): obviously these aren't complete responses, but this is all
# we care about
FAKE_NETWORKS = [
{
diff --git a/nova/tests/unit/api/openstack/compute/test_quota_classes.py b/nova/tests/unit/api/openstack/compute/test_quota_classes.py
index bdb33a7e1a..463f8344c0 100644
--- a/nova/tests/unit/api/openstack/compute/test_quota_classes.py
+++ b/nova/tests/unit/api/openstack/compute/test_quota_classes.py
@@ -12,12 +12,19 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
+
import copy
+from unittest import mock
+
+from oslo_limit import fixture as limit_fixture
import webob
from nova.api.openstack.compute import quota_classes \
as quota_classes_v21
from nova import exception
+from nova.limit import local as local_limit
+from nova.limit import placement as placement_limit
+from nova import objects
from nova import test
from nova.tests.unit.api.openstack import fakes
@@ -156,3 +163,220 @@ class QuotaClassSetsTestV257(QuotaClassSetsTestV250):
for resource in quota_classes_v21.FILTERED_QUOTAS_2_57:
self.quota_resources.pop(resource, None)
self.filtered_quotas.extend(quota_classes_v21.FILTERED_QUOTAS_2_57)
+
+
+class NoopQuotaClassesTest(test.NoDBTestCase):
+ quota_driver = "nova.quota.NoopQuotaDriver"
+
+ def setUp(self):
+ super(NoopQuotaClassesTest, self).setUp()
+ self.flags(driver=self.quota_driver, group="quota")
+ self.controller = quota_classes_v21.QuotaClassSetsController()
+
+ def test_show_v21(self):
+ req = fakes.HTTPRequest.blank("")
+ response = self.controller.show(req, "test_class")
+ expected_response = {
+ 'quota_class_set': {
+ 'id': 'test_class',
+ 'cores': -1,
+ 'fixed_ips': -1,
+ 'floating_ips': -1,
+ 'injected_file_content_bytes': -1,
+ 'injected_file_path_bytes': -1,
+ 'injected_files': -1,
+ 'instances': -1,
+ 'key_pairs': -1,
+ 'metadata_items': -1,
+ 'ram': -1,
+ 'security_group_rules': -1,
+ 'security_groups': -1
+ }
+ }
+ self.assertEqual(expected_response, response)
+
+ def test_show_v257(self):
+ req = fakes.HTTPRequest.blank("", version='2.57')
+ response = self.controller.show(req, "default")
+ expected_response = {
+ 'quota_class_set': {
+ 'id': 'default',
+ 'cores': -1,
+ 'instances': -1,
+ 'key_pairs': -1,
+ 'metadata_items': -1,
+ 'ram': -1,
+ 'server_group_members': -1,
+ 'server_groups': -1,
+ }
+ }
+ self.assertEqual(expected_response, response)
+
+ def test_update_v21_still_rejects_badrequests(self):
+ req = fakes.HTTPRequest.blank("")
+ body = {'quota_class_set': {'instances': 50, 'cores': 50,
+ 'ram': 51200, 'unsupported': 12}}
+ self.assertRaises(exception.ValidationError, self.controller.update,
+ req, 'test_class', body=body)
+
+ @mock.patch.object(objects.Quotas, "update_class")
+ def test_update_v21(self, mock_update):
+ req = fakes.HTTPRequest.blank("")
+ body = {'quota_class_set': {'ram': 51200}}
+ response = self.controller.update(req, 'default', body=body)
+ expected_response = {
+ 'quota_class_set': {
+ 'cores': -1,
+ 'fixed_ips': -1,
+ 'floating_ips': -1,
+ 'injected_file_content_bytes': -1,
+ 'injected_file_path_bytes': -1,
+ 'injected_files': -1,
+ 'instances': -1,
+ 'key_pairs': -1,
+ 'metadata_items': -1,
+ 'ram': -1,
+ 'security_group_rules': -1,
+ 'security_groups': -1
+ }
+ }
+ self.assertEqual(expected_response, response)
+ mock_update.assert_called_once_with(req.environ['nova.context'],
+ "default", "ram", 51200)
+
+ @mock.patch.object(objects.Quotas, "update_class")
+ def test_update_v257(self, mock_update):
+ req = fakes.HTTPRequest.blank("", version='2.57')
+ body = {'quota_class_set': {'ram': 51200}}
+ response = self.controller.update(req, 'default', body=body)
+ expected_response = {
+ 'quota_class_set': {
+ 'cores': -1,
+ 'instances': -1,
+ 'key_pairs': -1,
+ 'metadata_items': -1,
+ 'ram': -1,
+ 'server_group_members': -1,
+ 'server_groups': -1,
+ }
+ }
+ self.assertEqual(expected_response, response)
+ mock_update.assert_called_once_with(req.environ['nova.context'],
+ "default", "ram", 51200)
+
+
+class UnifiedLimitsQuotaClassesTest(NoopQuotaClassesTest):
+ quota_driver = "nova.quota.UnifiedLimitsDriver"
+
+ def setUp(self):
+ super(UnifiedLimitsQuotaClassesTest, self).setUp()
+ # Set server_groups so all config options get a different value
+ # but we also test as much as possible with the default config
+ self.flags(driver="nova.quota.UnifiedLimitsDriver", group='quota')
+ reglimits = {local_limit.SERVER_METADATA_ITEMS: 128,
+ local_limit.INJECTED_FILES: 5,
+ local_limit.INJECTED_FILES_CONTENT: 10 * 1024,
+ local_limit.INJECTED_FILES_PATH: 255,
+ local_limit.KEY_PAIRS: 100,
+ local_limit.SERVER_GROUPS: 12,
+ local_limit.SERVER_GROUP_MEMBERS: 10}
+ self.useFixture(limit_fixture.LimitFixture(reglimits, {}))
+
+ @mock.patch.object(placement_limit, "get_legacy_default_limits")
+ def test_show_v21(self, mock_default):
+ mock_default.return_value = {"instances": 1, "cores": 2, "ram": 3}
+ req = fakes.HTTPRequest.blank("")
+ response = self.controller.show(req, "test_class")
+ expected_response = {
+ 'quota_class_set': {
+ 'id': 'test_class',
+ 'cores': 2,
+ 'fixed_ips': -1,
+ 'floating_ips': -1,
+ 'ram': 3,
+ 'injected_file_content_bytes': 10240,
+ 'injected_file_path_bytes': 255,
+ 'injected_files': 5,
+ 'instances': 1,
+ 'key_pairs': 100,
+ 'metadata_items': 128,
+ 'security_group_rules': -1,
+ 'security_groups': -1,
+ }
+ }
+ self.assertEqual(expected_response, response)
+
+ @mock.patch.object(placement_limit, "get_legacy_default_limits")
+ def test_show_v257(self, mock_default):
+ mock_default.return_value = {"instances": 1, "cores": 2, "ram": 3}
+ req = fakes.HTTPRequest.blank("", version='2.57')
+ response = self.controller.show(req, "default")
+ expected_response = {
+ 'quota_class_set': {
+ 'id': 'default',
+ 'cores': 2,
+ 'instances': 1,
+ 'ram': 3,
+ 'key_pairs': 100,
+ 'metadata_items': 128,
+ 'server_group_members': 10,
+ 'server_groups': 12,
+ }
+ }
+ self.assertEqual(expected_response, response)
+
+ def test_update_still_rejects_badrequests(self):
+ req = fakes.HTTPRequest.blank("")
+ body = {'quota_class_set': {'instances': 50, 'cores': 50,
+ 'ram': 51200, 'unsupported': 12}}
+ self.assertRaises(exception.ValidationError, self.controller.update,
+ req, 'test_class', body=body)
+
+ @mock.patch.object(placement_limit, "get_legacy_default_limits")
+ @mock.patch.object(objects.Quotas, "update_class")
+ def test_update_v21(self, mock_update, mock_default):
+ mock_default.return_value = {"instances": 1, "cores": 2, "ram": 3}
+ req = fakes.HTTPRequest.blank("")
+ body = {'quota_class_set': {'ram': 51200}}
+ response = self.controller.update(req, 'default', body=body)
+ expected_response = {
+ 'quota_class_set': {
+ 'cores': 2,
+ 'fixed_ips': -1,
+ 'floating_ips': -1,
+ 'injected_file_content_bytes': 10240,
+ 'injected_file_path_bytes': 255,
+ 'injected_files': 5,
+ 'instances': 1,
+ 'key_pairs': 100,
+ 'metadata_items': 128,
+ 'ram': 3,
+ 'security_group_rules': -1,
+ 'security_groups': -1
+ }
+ }
+ self.assertEqual(expected_response, response)
+ # TODO(johngarbutt) we should be proxying to keystone
+ self.assertEqual(0, mock_update.call_count)
+
+ @mock.patch.object(placement_limit, "get_legacy_default_limits")
+ @mock.patch.object(objects.Quotas, "update_class")
+ def test_update_v257(self, mock_update, mock_default):
+ mock_default.return_value = {"instances": 1, "cores": 2, "ram": 3}
+ req = fakes.HTTPRequest.blank("", version='2.57')
+ body = {'quota_class_set': {'ram': 51200}}
+ response = self.controller.update(req, 'default', body=body)
+ expected_response = {
+ 'quota_class_set': {
+ 'cores': 2,
+ 'instances': 1,
+ 'ram': 3,
+ 'key_pairs': 100,
+ 'metadata_items': 128,
+ 'server_group_members': 10,
+ 'server_groups': 12,
+ }
+ }
+ self.assertEqual(expected_response, response)
+ # TODO(johngarbutt) we should be proxying to keystone
+ self.assertEqual(0, mock_update.call_count)
diff --git a/nova/tests/unit/api/openstack/compute/test_quotas.py b/nova/tests/unit/api/openstack/compute/test_quotas.py
index 545bd51e13..0a1bbd08d8 100644
--- a/nova/tests/unit/api/openstack/compute/test_quotas.py
+++ b/nova/tests/unit/api/openstack/compute/test_quotas.py
@@ -14,12 +14,18 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
+from oslo_limit import fixture as limit_fixture
+from oslo_utils.fixture import uuidsentinel as uuids
import webob
from nova.api.openstack.compute import quota_sets as quotas_v21
from nova.db import constants as db_const
from nova import exception
+from nova.limit import local as local_limit
+from nova.limit import placement as placement_limit
+from nova import objects
from nova import quota
from nova import test
from nova.tests.unit.api.openstack import fakes
@@ -660,3 +666,475 @@ class QuotaSetsTestV275(QuotaSetsTestV257):
query_string=query_string)
self.assertRaises(exception.ValidationError, self.controller.delete,
req, 1234)
+
+
+class NoopQuotaSetsTest(test.NoDBTestCase):
+ quota_driver = "nova.quota.NoopQuotaDriver"
+ expected_detail = {'in_use': -1, 'limit': -1, 'reserved': -1}
+
+ def setUp(self):
+ super(NoopQuotaSetsTest, self).setUp()
+ self.flags(driver=self.quota_driver, group="quota")
+ self.controller = quotas_v21.QuotaSetsController()
+ self.stub_out('nova.api.openstack.identity.verify_project_id',
+ lambda ctx, project_id: True)
+
+ def test_show_v21(self):
+ req = fakes.HTTPRequest.blank("")
+ response = self.controller.show(req, uuids.project_id)
+ expected_response = {
+ 'quota_set': {
+ 'id': uuids.project_id,
+ 'cores': -1,
+ 'fixed_ips': -1,
+ 'floating_ips': -1,
+ 'injected_file_content_bytes': -1,
+ 'injected_file_path_bytes': -1,
+ 'injected_files': -1,
+ 'instances': -1,
+ 'key_pairs': -1,
+ 'metadata_items': -1,
+ 'ram': -1,
+ 'security_group_rules': -1,
+ 'security_groups': -1,
+ 'server_group_members': -1,
+ 'server_groups': -1,
+ }
+ }
+ self.assertEqual(expected_response, response)
+
+ def test_show_v257(self):
+ req = fakes.HTTPRequest.blank("", version='2.57')
+ response = self.controller.show(req, uuids.project_id)
+ expected_response = {
+ 'quota_set': {
+ 'id': uuids.project_id,
+ 'cores': -1,
+ 'instances': -1,
+ 'key_pairs': -1,
+ 'metadata_items': -1,
+ 'ram': -1,
+ 'server_group_members': -1,
+ 'server_groups': -1}}
+ self.assertEqual(expected_response, response)
+
+ def test_detail_v21(self):
+ req = fakes.HTTPRequest.blank("")
+ response = self.controller.detail(req, uuids.project_id)
+
+ expected_response = {
+ 'quota_set': {
+ 'id': uuids.project_id,
+ 'cores': self.expected_detail,
+ 'fixed_ips': self.expected_detail,
+ 'floating_ips': self.expected_detail,
+ 'injected_file_content_bytes': self.expected_detail,
+ 'injected_file_path_bytes': self.expected_detail,
+ 'injected_files': self.expected_detail,
+ 'instances': self.expected_detail,
+ 'key_pairs': self.expected_detail,
+ 'metadata_items': self.expected_detail,
+ 'ram': self.expected_detail,
+ 'security_group_rules': self.expected_detail,
+ 'security_groups': self.expected_detail,
+ 'server_group_members': self.expected_detail,
+ 'server_groups': self.expected_detail,
+ }
+ }
+ self.assertEqual(expected_response, response)
+
+ def test_detail_v21_user(self):
+ req = fakes.HTTPRequest.blank("?user_id=42")
+ response = self.controller.detail(req, uuids.project_id)
+ expected_response = {
+ 'quota_set': {
+ 'id': uuids.project_id,
+ 'cores': self.expected_detail,
+ 'fixed_ips': self.expected_detail,
+ 'floating_ips': self.expected_detail,
+ 'injected_file_content_bytes': self.expected_detail,
+ 'injected_file_path_bytes': self.expected_detail,
+ 'injected_files': self.expected_detail,
+ 'instances': self.expected_detail,
+ 'key_pairs': self.expected_detail,
+ 'metadata_items': self.expected_detail,
+ 'ram': self.expected_detail,
+ 'security_group_rules': self.expected_detail,
+ 'security_groups': self.expected_detail,
+ 'server_group_members': self.expected_detail,
+ 'server_groups': self.expected_detail,
+ }
+ }
+ self.assertEqual(expected_response, response)
+
+ def test_update_still_rejects_badrequests(self):
+ req = fakes.HTTPRequest.blank("")
+ body = {'quota_set': {'instances': 50, 'cores': 50,
+ 'ram': 51200, 'unsupported': 12}}
+ self.assertRaises(exception.ValidationError, self.controller.update,
+ req, uuids.project_id, body=body)
+
+ @mock.patch.object(objects.Quotas, "create_limit")
+ def test_update_v21(self, mock_create):
+ req = fakes.HTTPRequest.blank("")
+ body = {'quota_set': {'server_groups': 2}}
+ response = self.controller.update(req, uuids.project_id, body=body)
+ expected_response = {
+ 'quota_set': {
+ 'cores': -1,
+ 'fixed_ips': -1,
+ 'floating_ips': -1,
+ 'injected_file_content_bytes': -1,
+ 'injected_file_path_bytes': -1,
+ 'injected_files': -1,
+ 'instances': -1,
+ 'key_pairs': -1,
+ 'metadata_items': -1,
+ 'ram': -1,
+ 'security_group_rules': -1,
+ 'security_groups': -1,
+ 'server_group_members': -1,
+ 'server_groups': -1,
+ }
+ }
+ self.assertEqual(expected_response, response)
+ mock_create.assert_called_once_with(req.environ['nova.context'],
+ uuids.project_id, "server_groups",
+ 2, user_id=None)
+
+ @mock.patch.object(objects.Quotas, "create_limit")
+ def test_update_v21_user(self, mock_create):
+ req = fakes.HTTPRequest.blank("?user_id=42")
+ body = {'quota_set': {'key_pairs': 52}}
+ response = self.controller.update(req, uuids.project_id, body=body)
+ expected_response = {
+ 'quota_set': {
+ 'cores': -1,
+ 'fixed_ips': -1,
+ 'floating_ips': -1,
+ 'injected_file_content_bytes': -1,
+ 'injected_file_path_bytes': -1,
+ 'injected_files': -1,
+ 'instances': -1,
+ 'key_pairs': -1,
+ 'metadata_items': -1,
+ 'ram': -1,
+ 'security_group_rules': -1,
+ 'security_groups': -1,
+ 'server_group_members': -1,
+ 'server_groups': -1,
+ }
+ }
+ self.assertEqual(expected_response, response)
+ mock_create.assert_called_once_with(req.environ['nova.context'],
+ uuids.project_id, "key_pairs", 52,
+ user_id="42")
+
+ def test_defaults_v21(self):
+ req = fakes.HTTPRequest.blank("")
+ response = self.controller.defaults(req, uuids.project_id)
+ expected_response = {
+ 'quota_set': {
+ 'id': uuids.project_id,
+ 'cores': -1,
+ 'fixed_ips': -1,
+ 'floating_ips': -1,
+ 'injected_file_content_bytes': -1,
+ 'injected_file_path_bytes': -1,
+ 'injected_files': -1,
+ 'instances': -1,
+ 'key_pairs': -1,
+ 'metadata_items': -1,
+ 'ram': -1,
+ 'security_group_rules': -1,
+ 'security_groups': -1,
+ 'server_group_members': -1,
+ 'server_groups': -1,
+ }
+ }
+ self.assertEqual(expected_response, response)
+
+ @mock.patch('nova.objects.Quotas.destroy_all_by_project')
+ def test_quotas_delete(self, mock_destroy_all_by_project):
+ req = fakes.HTTPRequest.blank("")
+ self.controller.delete(req, "1234")
+ mock_destroy_all_by_project.assert_called_once_with(
+ req.environ['nova.context'], "1234")
+
+ @mock.patch('nova.objects.Quotas.destroy_all_by_project_and_user')
+ def test_user_quotas_delete(self, mock_destroy_all_by_user):
+ req = fakes.HTTPRequest.blank("?user_id=42")
+ self.controller.delete(req, "1234")
+ mock_destroy_all_by_user.assert_called_once_with(
+ req.environ['nova.context'], "1234", "42")
+
+
+class UnifiedLimitsQuotaSetsTest(NoopQuotaSetsTest):
+ quota_driver = "nova.quota.UnifiedLimitsDriver"
+ # this matches what the db driver returns
+ expected_detail = {'in_use': 0, 'limit': -1, 'reserved': 0}
+
+ def setUp(self):
+ super(UnifiedLimitsQuotaSetsTest, self).setUp()
+ reglimits = {local_limit.SERVER_METADATA_ITEMS: 128,
+ local_limit.INJECTED_FILES: 5,
+ local_limit.INJECTED_FILES_CONTENT: 10 * 1024,
+ local_limit.INJECTED_FILES_PATH: 255,
+ local_limit.KEY_PAIRS: 100,
+ local_limit.SERVER_GROUPS: 12,
+ local_limit.SERVER_GROUP_MEMBERS: 10}
+ self.limit_fixture = self.useFixture(
+ limit_fixture.LimitFixture(reglimits, {}))
+
+ @mock.patch.object(placement_limit, "get_legacy_project_limits")
+ def test_show_v21(self, mock_proj):
+ mock_proj.return_value = {"instances": 1, "cores": 2, "ram": 3}
+ req = fakes.HTTPRequest.blank("")
+ response = self.controller.show(req, uuids.project_id)
+ expected_response = {
+ 'quota_set': {
+ 'id': uuids.project_id,
+ 'cores': 2,
+ 'fixed_ips': -1,
+ 'floating_ips': -1,
+ 'injected_file_content_bytes': 10240,
+ 'injected_file_path_bytes': 255,
+ 'injected_files': 5,
+ 'instances': 1,
+ 'key_pairs': 100,
+ 'metadata_items': 128,
+ 'ram': 3,
+ 'security_group_rules': -1,
+ 'security_groups': -1,
+ 'server_group_members': 10,
+ 'server_groups': 12,
+ }
+ }
+ self.assertEqual(expected_response, response)
+
+ @mock.patch.object(placement_limit, "get_legacy_project_limits")
+ def test_show_v257(self, mock_proj):
+ mock_proj.return_value = {"instances": 1, "cores": 2, "ram": 3}
+ req = fakes.HTTPRequest.blank("", version='2.57')
+ response = self.controller.show(req, uuids.project_id)
+ expected_response = {
+ 'quota_set': {
+ 'id': uuids.project_id,
+ 'cores': 2,
+ 'instances': 1,
+ 'key_pairs': 100,
+ 'metadata_items': 128,
+ 'ram': 3,
+ 'server_group_members': 10,
+ 'server_groups': 12}}
+ self.assertEqual(expected_response, response)
+
+ @mock.patch.object(placement_limit, "get_legacy_counts")
+ @mock.patch.object(placement_limit, "get_legacy_project_limits")
+ @mock.patch.object(objects.InstanceGroupList, "get_counts")
+ def test_detail_v21(self, mock_count, mock_proj, mock_kcount):
+ mock_proj.return_value = {"instances": 1, "cores": 2, "ram": 3}
+ mock_kcount.return_value = {"instances": 4, "cores": 5, "ram": 6}
+ mock_count.return_value = {'project': {'server_groups': 9}}
+ req = fakes.HTTPRequest.blank("")
+ response = self.controller.detail(req, uuids.project_id)
+ expected_response = {
+ 'quota_set': {
+ 'id': uuids.project_id,
+ 'cores': {
+ 'in_use': 5, 'limit': 2, 'reserved': 0},
+ 'fixed_ips': self.expected_detail,
+ 'floating_ips': self.expected_detail,
+ 'injected_file_content_bytes': {
+ 'in_use': 0, 'limit': 10240, 'reserved': 0},
+ 'injected_file_path_bytes': {
+ 'in_use': 0, 'limit': 255, 'reserved': 0},
+ 'injected_files': {
+ 'in_use': 0, 'limit': 5, 'reserved': 0},
+ 'instances': {
+ 'in_use': 4, 'limit': 1, 'reserved': 0},
+ 'key_pairs': {
+ 'in_use': 0, 'limit': 100, 'reserved': 0},
+ 'metadata_items': {
+ 'in_use': 0, 'limit': 128, 'reserved': 0},
+ 'ram': {
+ 'in_use': 6, 'limit': 3, 'reserved': 0},
+ 'security_group_rules': self.expected_detail,
+ 'security_groups': self.expected_detail,
+ 'server_group_members': {
+ 'in_use': 0, 'limit': 10, 'reserved': 0},
+ 'server_groups': {
+ 'in_use': 9, 'limit': 12, 'reserved': 0},
+ }
+ }
+ self.assertEqual(expected_response, response)
+
+ @mock.patch.object(placement_limit, "get_legacy_counts")
+ @mock.patch.object(placement_limit, "get_legacy_project_limits")
+ @mock.patch.object(objects.InstanceGroupList, "get_counts")
+ def test_detail_v21_user(self, mock_count, mock_proj, mock_kcount):
+ mock_proj.return_value = {"instances": 1, "cores": 2, "ram": 3}
+ mock_kcount.return_value = {"instances": 4, "cores": 5, "ram": 6}
+ mock_count.return_value = {'project': {'server_groups': 9}}
+ req = fakes.HTTPRequest.blank("?user_id=42")
+ response = self.controller.detail(req, uuids.project_id)
+ expected_response = {
+ 'quota_set': {
+ 'id': uuids.project_id,
+ 'cores': {
+ 'in_use': 5, 'limit': 2, 'reserved': 0},
+ 'fixed_ips': self.expected_detail,
+ 'floating_ips': self.expected_detail,
+ 'injected_file_content_bytes': {
+ 'in_use': 0, 'limit': 10240, 'reserved': 0},
+ 'injected_file_path_bytes': {
+ 'in_use': 0, 'limit': 255, 'reserved': 0},
+ 'injected_files': {
+ 'in_use': 0, 'limit': 5, 'reserved': 0},
+ 'instances': {
+ 'in_use': 4, 'limit': 1, 'reserved': 0},
+ 'key_pairs': {
+ 'in_use': 0, 'limit': 100, 'reserved': 0},
+ 'metadata_items': {
+ 'in_use': 0, 'limit': 128, 'reserved': 0},
+ 'ram': {
+ 'in_use': 6, 'limit': 3, 'reserved': 0},
+ 'security_group_rules': self.expected_detail,
+ 'security_groups': self.expected_detail,
+ 'server_group_members': {
+ 'in_use': 0, 'limit': 10, 'reserved': 0},
+ 'server_groups': {
+ 'in_use': 9, 'limit': 12, 'reserved': 0},
+ }
+ }
+ self.assertEqual(expected_response, response)
+
+ @mock.patch.object(placement_limit, "get_legacy_project_limits")
+ @mock.patch.object(objects.Quotas, "create_limit")
+ def test_update_v21(self, mock_create, mock_proj):
+ mock_proj.return_value = {"instances": 1, "cores": 2, "ram": 3}
+ req = fakes.HTTPRequest.blank("")
+ # TODO(johngarbutt) still need to implement get_settable_quotas
+ body = {'quota_set': {'server_groups': 2}}
+ response = self.controller.update(req, uuids.project_id, body=body)
+ expected_response = {
+ 'quota_set': {
+ 'cores': 2,
+ 'fixed_ips': -1,
+ 'floating_ips': -1,
+ 'injected_file_content_bytes': 10240,
+ 'injected_file_path_bytes': 255,
+ 'injected_files': 5,
+ 'instances': 1,
+ 'key_pairs': 100,
+ 'metadata_items': 128,
+ 'ram': 3,
+ 'security_group_rules': -1,
+ 'security_groups': -1,
+ 'server_group_members': 10,
+ 'server_groups': 12,
+ }
+ }
+ self.assertEqual(expected_response, response)
+ self.assertEqual(0, mock_create.call_count)
+
+ @mock.patch.object(placement_limit, "get_legacy_project_limits")
+ @mock.patch.object(objects.Quotas, "create_limit")
+ def test_update_v21_user(self, mock_create, mock_proj):
+ mock_proj.return_value = {"instances": 1, "cores": 2, "ram": 3}
+ req = fakes.HTTPRequest.blank("?user_id=42")
+ body = {'quota_set': {'key_pairs': 52}}
+ response = self.controller.update(req, uuids.project_id, body=body)
+ expected_response = {
+ 'quota_set': {
+ 'cores': 2,
+ 'fixed_ips': -1,
+ 'floating_ips': -1,
+ 'injected_file_content_bytes': 10240,
+ 'injected_file_path_bytes': 255,
+ 'injected_files': 5,
+ 'instances': 1,
+ 'key_pairs': 100,
+ 'metadata_items': 128,
+ 'ram': 3,
+ 'security_group_rules': -1,
+ 'security_groups': -1,
+ 'server_group_members': 10,
+ 'server_groups': 12,
+ }
+ }
+ self.assertEqual(expected_response, response)
+ self.assertEqual(0, mock_create.call_count)
+
+ @mock.patch.object(placement_limit, "get_legacy_default_limits")
+ def test_defaults_v21(self, mock_default):
+ mock_default.return_value = {"instances": 1, "cores": 2, "ram": 3}
+ req = fakes.HTTPRequest.blank("")
+ response = self.controller.defaults(req, uuids.project_id)
+ expected_response = {
+ 'quota_set': {
+ 'id': uuids.project_id,
+ 'cores': 2,
+ 'fixed_ips': -1,
+ 'floating_ips': -1,
+ 'injected_file_content_bytes': 10240,
+ 'injected_file_path_bytes': 255,
+ 'injected_files': 5,
+ 'instances': 1,
+ 'key_pairs': 100,
+ 'metadata_items': 128,
+ 'ram': 3,
+ 'security_group_rules': -1,
+ 'security_groups': -1,
+ 'server_group_members': 10,
+ 'server_groups': 12,
+ }
+ }
+ self.assertEqual(expected_response, response)
+
+ def test_defaults_v21_different_limit_values(self):
+ reglimits = {local_limit.SERVER_METADATA_ITEMS: 7,
+ local_limit.INJECTED_FILES: 6,
+ local_limit.INJECTED_FILES_CONTENT: 4,
+ local_limit.INJECTED_FILES_PATH: 5,
+ local_limit.KEY_PAIRS: 1,
+ local_limit.SERVER_GROUPS: 3,
+ local_limit.SERVER_GROUP_MEMBERS: 2}
+ self.limit_fixture.reglimits = reglimits
+
+ req = fakes.HTTPRequest.blank("")
+ response = self.controller.defaults(req, uuids.project_id)
+ expected_response = {
+ 'quota_set': {
+ 'id': uuids.project_id,
+ 'cores': 0,
+ 'fixed_ips': -1,
+ 'floating_ips': -1,
+ 'injected_file_content_bytes': 4,
+ 'injected_file_path_bytes': 5,
+ 'injected_files': 6,
+ 'instances': 0,
+ 'key_pairs': 1,
+ 'metadata_items': 7,
+ 'ram': 0,
+ 'security_group_rules': -1,
+ 'security_groups': -1,
+ 'server_group_members': 2,
+ 'server_groups': 3,
+ }
+ }
+ self.assertEqual(expected_response, response)
+
+ @mock.patch('nova.objects.Quotas.destroy_all_by_project')
+ def test_quotas_delete(self, mock_destroy_all_by_project):
+ req = fakes.HTTPRequest.blank("")
+ self.controller.delete(req, "1234")
+ # Ensure destroy isn't called for unified limits
+ self.assertEqual(0, mock_destroy_all_by_project.call_count)
+
+ @mock.patch('nova.objects.Quotas.destroy_all_by_project_and_user')
+ def test_user_quotas_delete(self, mock_destroy_all_by_user):
+ req = fakes.HTTPRequest.blank("?user_id=42")
+ self.controller.delete(req, "1234")
+ # Ensure destroy isn't called for unified limits
+ self.assertEqual(0, mock_destroy_all_by_user.call_count)
diff --git a/nova/tests/unit/api/openstack/compute/test_remote_consoles.py b/nova/tests/unit/api/openstack/compute/test_remote_consoles.py
index 6427b1abf0..961f4a02c9 100644
--- a/nova/tests/unit/api/openstack/compute/test_remote_consoles.py
+++ b/nova/tests/unit/api/openstack/compute/test_remote_consoles.py
@@ -13,7 +13,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
import webob
from nova.api.openstack import api_version_request
@@ -103,6 +104,18 @@ class ConsolesExtensionTestV21(test.NoDBTestCase):
'get_vnc_console',
exception.InstanceNotFound(instance_id=fakes.FAKE_UUID))
+ def test_get_vnc_console_instance_invalid_state(self):
+ body = {'os-getVNCConsole': {'type': 'novnc'}}
+ self._check_console_failure(
+ self.controller.get_vnc_console,
+ webob.exc.HTTPConflict,
+ body,
+ 'get_vnc_console',
+ exception.InstanceInvalidState(
+ attr='fake-attr', state='fake-state', method='fake-method',
+ instance_uuid=fakes.FAKE_UUID)
+ )
+
def test_get_vnc_console_invalid_type(self):
body = {'os-getVNCConsole': {'type': 'invalid'}}
self._check_console_failure(
@@ -446,7 +459,7 @@ class ConsolesExtensionTestV26(test.NoDBTestCase):
self.req, fakes.FAKE_UUID, body=body)
self.assertTrue(mock_handler.called)
- def test_create_console_not_found(self,):
+ def test_create_console_not_found(self):
mock_handler = mock.MagicMock()
mock_handler.side_effect = exception.InstanceNotFound(
instance_id='xxx')
diff --git a/nova/tests/unit/api/openstack/compute/test_rescue.py b/nova/tests/unit/api/openstack/compute/test_rescue.py
index 28b8217d1a..8a87f52222 100644
--- a/nova/tests/unit/api/openstack/compute/test_rescue.py
+++ b/nova/tests/unit/api/openstack/compute/test_rescue.py
@@ -12,7 +12,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
import ddt
from oslo_utils.fixture import uuidsentinel as uuids
diff --git a/nova/tests/unit/api/openstack/compute/test_security_groups.py b/nova/tests/unit/api/openstack/compute/test_security_groups.py
index 71cdcbc871..4a85a9997d 100644
--- a/nova/tests/unit/api/openstack/compute/test_security_groups.py
+++ b/nova/tests/unit/api/openstack/compute/test_security_groups.py
@@ -13,8 +13,9 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
from neutronclient.common import exceptions as n_exc
+from unittest import mock
+
from oslo_config import cfg
from oslo_serialization import jsonutils
from oslo_utils import encodeutils
diff --git a/nova/tests/unit/api/openstack/compute/test_server_actions.py b/nova/tests/unit/api/openstack/compute/test_server_actions.py
index d07924abe8..08f7a31573 100644
--- a/nova/tests/unit/api/openstack/compute/test_server_actions.py
+++ b/nova/tests/unit/api/openstack/compute/test_server_actions.py
@@ -13,9 +13,10 @@
# License for the specific language governing permissions and limitations
# under the License.
+from unittest import mock
+
import ddt
import fixtures
-import mock
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import uuidutils
import webob
@@ -66,11 +67,11 @@ class ServerActionsControllerTestV21(test.TestCase):
self.controller = self._get_controller()
self.compute_api = self.controller.compute_api
- # We don't care about anything getting as far as hitting the compute
- # RPC API so we just mock it out here.
- mock_rpcapi = mock.patch.object(self.compute_api, 'compute_rpcapi')
- mock_rpcapi.start()
- self.addCleanup(mock_rpcapi.stop)
+ # In most of the cases we don't care about anything getting as far as
+ # hitting the compute RPC API so we just mock it out here.
+ patcher_rpcapi = mock.patch.object(self.compute_api, 'compute_rpcapi')
+ self.mock_rpcapi = patcher_rpcapi.start()
+ self.addCleanup(patcher_rpcapi.stop)
# The project_id here matches what is used by default in
# fake_compute_get which need to match for policy checks.
self.req = fakes.HTTPRequest.blank('',
@@ -1079,21 +1080,23 @@ class ServerActionsControllerTestV21(test.TestCase):
snapshot = dict(id=_fake_id('d'))
+ self.mock_rpcapi.quiesce_instance.side_effect = (
+ exception.InstanceQuiesceNotSupported(
+ instance_id="fake", reason="test"
+ )
+ )
+
with test.nested(
mock.patch.object(
self.controller.compute_api.volume_api, 'get_absolute_limits',
return_value={'totalSnapshotsUsed': 0,
'maxTotalSnapshots': 10}),
- mock.patch.object(self.controller.compute_api.compute_rpcapi,
- 'quiesce_instance',
- side_effect=exception.InstanceQuiesceNotSupported(
- instance_id='fake', reason='test')),
mock.patch.object(self.controller.compute_api.volume_api, 'get',
return_value=volume),
mock.patch.object(self.controller.compute_api.volume_api,
'create_snapshot_force',
return_value=snapshot),
- ) as (mock_get_limits, mock_quiesce, mock_vol_get, mock_vol_create):
+ ) as (mock_get_limits, mock_vol_get, mock_vol_create):
if mock_vol_create_side_effect:
mock_vol_create.side_effect = mock_vol_create_side_effect
@@ -1125,7 +1128,7 @@ class ServerActionsControllerTestV21(test.TestCase):
for k in extra_properties.keys():
self.assertEqual(properties[k], extra_properties[k])
- mock_quiesce.assert_called_once_with(mock.ANY, mock.ANY)
+ self.mock_rpcapi.quiesce_instance.assert_called_once()
mock_vol_get.assert_called_once_with(mock.ANY, volume['id'])
mock_vol_create.assert_called_once_with(mock.ANY, volume['id'],
mock.ANY, mock.ANY)
@@ -1189,21 +1192,23 @@ class ServerActionsControllerTestV21(test.TestCase):
snapshot = dict(id=_fake_id('d'))
+ self.mock_rpcapi.quiesce_instance.side_effect = (
+ exception.InstanceQuiesceNotSupported(
+ instance_id="fake", reason="test"
+ )
+ )
+
with test.nested(
mock.patch.object(
self.controller.compute_api.volume_api, 'get_absolute_limits',
return_value={'totalSnapshotsUsed': 0,
'maxTotalSnapshots': 10}),
- mock.patch.object(self.controller.compute_api.compute_rpcapi,
- 'quiesce_instance',
- side_effect=exception.InstanceQuiesceNotSupported(
- instance_id='fake', reason='test')),
mock.patch.object(self.controller.compute_api.volume_api, 'get',
return_value=volume),
mock.patch.object(self.controller.compute_api.volume_api,
'create_snapshot_force',
return_value=snapshot),
- ) as (mock_get_limits, mock_quiesce, mock_vol_get, mock_vol_create):
+ ) as (mock_get_limits, mock_vol_get, mock_vol_create):
response = self.controller._action_create_image(self.req,
FAKE_UUID, body=body)
@@ -1218,7 +1223,7 @@ class ServerActionsControllerTestV21(test.TestCase):
for key, val in extra_metadata.items():
self.assertEqual(properties[key], val)
- mock_quiesce.assert_called_once_with(mock.ANY, mock.ANY)
+ self.mock_rpcapi.quiesce_instance.assert_called_once()
mock_vol_get.assert_called_once_with(mock.ANY, volume['id'])
mock_vol_create.assert_called_once_with(mock.ANY, volume['id'],
mock.ANY, mock.ANY)
diff --git a/nova/tests/unit/api/openstack/compute/test_server_diagnostics.py b/nova/tests/unit/api/openstack/compute/test_server_diagnostics.py
index d215f3e903..12d8bbb318 100644
--- a/nova/tests/unit/api/openstack/compute/test_server_diagnostics.py
+++ b/nova/tests/unit/api/openstack/compute/test_server_diagnostics.py
@@ -13,7 +13,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from oslo_serialization import jsonutils
from oslo_utils.fixture import uuidsentinel as uuids
diff --git a/nova/tests/unit/api/openstack/compute/test_server_external_events.py b/nova/tests/unit/api/openstack/compute/test_server_external_events.py
index 2ca97fc6d8..e366d0acdd 100644
--- a/nova/tests/unit/api/openstack/compute/test_server_external_events.py
+++ b/nova/tests/unit/api/openstack/compute/test_server_external_events.py
@@ -12,8 +12,9 @@
# License for the specific language governing permissions and limitations
# under the License.
+from unittest import mock
+
import fixtures as fx
-import mock
from oslo_utils.fixture import uuidsentinel as uuids
from nova.api.openstack.compute import server_external_events \
@@ -192,7 +193,7 @@ class ServerExternalEventsTestV21(test.NoDBTestCase):
self.api.create, self.req, body=body)
def test_create_unknown_events(self):
- self.event_1['name'] = 'unkown_event'
+ self.event_1['name'] = 'unknown_event'
body = {'events': self.event_1}
self.assertRaises(self.invalid_error,
self.api.create, self.req, body=body)
diff --git a/nova/tests/unit/api/openstack/compute/test_server_group_quotas.py b/nova/tests/unit/api/openstack/compute/test_server_group_quotas.py
index 6b08be6fd9..fe7a60f956 100644
--- a/nova/tests/unit/api/openstack/compute/test_server_group_quotas.py
+++ b/nova/tests/unit/api/openstack/compute/test_server_group_quotas.py
@@ -13,14 +13,18 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from oslo_config import cfg
+from oslo_limit import fixture as limit_fixture
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import uuidutils
import webob
from nova.api.openstack.compute import server_groups as sg_v21
from nova import context
+from nova import exception
+from nova.limit import local as local_limit
from nova import objects
from nova import test
from nova.tests.unit.api.openstack import fakes
@@ -116,14 +120,41 @@ class ServerGroupQuotasTestV21(test.TestCase):
self.controller.create,
self.req, body={'server_group': sgroup})
+ def _test_create_server_group_during_recheck(self, mock_method):
+ self._setup_quotas()
+ sgroup = server_group_template()
+ policies = ['anti-affinity']
+ sgroup['policies'] = policies
+ e = self.assertRaises(webob.exc.HTTPForbidden,
+ self.controller.create,
+ self.req, body={'server_group': sgroup})
+ self.assertEqual(2, mock_method.call_count)
+ return e
+
@mock.patch('nova.objects.Quotas.check_deltas')
- def test_create_server_group_recheck_disabled(self, mock_check):
+ def test_create_server_group_during_recheck(self, mock_check):
+ """Simulate a race where this request initially has enough quota to
+ progress partially through the create path but then fails the quota
+ recheck because a parallel request filled up the quota first.
+ """
+ # First quota check succeeds, second (recheck) fails.
+ mock_check.side_effect = [None,
+ exception.OverQuota(overs='server_groups')]
+ e = self._test_create_server_group_during_recheck(mock_check)
+ expected = 'Quota exceeded, too many server groups.'
+ self.assertEqual(expected, str(e))
+
+ def _test_create_server_group_recheck_disabled(self):
self.flags(recheck_quota=False, group='quota')
self._setup_quotas()
sgroup = server_group_template()
policies = ['anti-affinity']
sgroup['policies'] = policies
self.controller.create(self.req, body={'server_group': sgroup})
+
+ @mock.patch('nova.objects.Quotas.check_deltas')
+ def test_create_server_group_recheck_disabled(self, mock_check):
+ self._test_create_server_group_recheck_disabled()
ctxt = self.req.environ['nova.context']
mock_check.assert_called_once_with(ctxt, {'server_groups': 1},
ctxt.project_id, ctxt.user_id)
@@ -170,3 +201,76 @@ class ServerGroupQuotasTestV21(test.TestCase):
else:
status_int = resp.status_int
self.assertEqual(204, status_int)
+
+
+class ServerGroupQuotasUnifiedLimitsTestV21(ServerGroupQuotasTestV21):
+
+ def setUp(self):
+ super(ServerGroupQuotasUnifiedLimitsTestV21, self).setUp()
+ self.flags(driver='nova.quota.UnifiedLimitsDriver', group='quota')
+ self.req = fakes.HTTPRequest.blank('')
+ self.controller = sg_v21.ServerGroupController()
+ self.limit_fixture = self.useFixture(
+ limit_fixture.LimitFixture({'server_groups': 10}, {}))
+
+ @mock.patch('nova.limit.local.enforce_db_limit')
+ def test_create_server_group_during_recheck(self, mock_enforce):
+ """Simulate a race where this request initially has enough quota to
+ progress partially through the create path but then fails the quota
+ recheck because a parallel request filled up the quota first.
+ """
+ # First quota check succeeds, second (recheck) fails.
+ mock_enforce.side_effect = [
+ None,
+ exception.ServerGroupLimitExceeded(message='oslo.limit message')]
+ # Run the test using the unified limits enforce method.
+ e = self._test_create_server_group_during_recheck(mock_enforce)
+ expected = 'oslo.limit message'
+ self.assertEqual(expected, str(e))
+
+ @mock.patch('nova.limit.local.enforce_db_limit')
+ def test_create_server_group_recheck_disabled(self, mock_enforce):
+ # Run the test using the unified limits enforce method.
+ self._test_create_server_group_recheck_disabled()
+ ctxt = self.req.environ['nova.context']
+ mock_enforce.assert_called_once_with(ctxt, 'server_groups',
+ entity_scope=ctxt.project_id,
+ delta=1)
+
+ def test_create_group_fails_with_zero_quota(self):
+ self.limit_fixture.reglimits = {'server_groups': 0}
+ sgroup = {'name': 'test', 'policies': ['anti-affinity']}
+ exc = self.assertRaises(webob.exc.HTTPForbidden,
+ self.controller.create,
+ self.req, body={'server_group': sgroup})
+ msg = ("Resource %s is over limit" % local_limit.SERVER_GROUPS)
+ self.assertIn(msg, str(exc))
+
+ def test_create_only_one_group_when_limit_is_one(self):
+ self.limit_fixture.reglimits = {'server_groups': 1}
+ policies = ['anti-affinity']
+ sgroup = {'name': 'test', 'policies': policies}
+ res_dict = self.controller.create(
+ self.req, body={'server_group': sgroup})
+ self.assertEqual(res_dict['server_group']['name'], 'test')
+ self.assertTrue(uuidutils.is_uuid_like(res_dict['server_group']['id']))
+ self.assertEqual(res_dict['server_group']['policies'], policies)
+
+ # prove we can't create two, as limited to one
+ sgroup2 = {'name': 'test2', 'policies': policies}
+ exc = self.assertRaises(webob.exc.HTTPForbidden,
+ self.controller.create,
+ self.req, body={'server_group': sgroup2})
+ msg = ("Resource %s is over limit" % local_limit.SERVER_GROUPS)
+ self.assertIn(msg, str(exc))
+
+ # delete first one
+ self.controller.delete(self.req, res_dict['server_group']['id'])
+
+ # prove we can now create the second one
+ res_dict2 = self.controller.create(
+ self.req, body={'server_group': sgroup2})
+ self.assertEqual(res_dict2['server_group']['name'], 'test2')
+ self.assertTrue(
+ uuidutils.is_uuid_like(res_dict2['server_group']['id']))
+ self.assertEqual(res_dict2['server_group']['policies'], policies)
diff --git a/nova/tests/unit/api/openstack/compute/test_server_groups.py b/nova/tests/unit/api/openstack/compute/test_server_groups.py
index a0d1712343..9d99c3ae6d 100644
--- a/nova/tests/unit/api/openstack/compute/test_server_groups.py
+++ b/nova/tests/unit/api/openstack/compute/test_server_groups.py
@@ -14,7 +14,8 @@
# under the License.
import copy
-import mock
+from unittest import mock
+
from oslo_utils.fixture import uuidsentinel
from oslo_utils import uuidutils
import webob
@@ -86,7 +87,8 @@ class ServerGroupTestV21(test.NoDBTestCase):
def setUp(self):
super(ServerGroupTestV21, self).setUp()
self._setup_controller()
- self.req = fakes.HTTPRequest.blank('')
+ self.member_req = fakes.HTTPRequest.member_req('')
+ self.reader_req = fakes.HTTPRequest.reader_req('')
self.admin_req = fakes.HTTPRequest.blank('', use_admin_context=True)
self.foo_req = fakes.HTTPRequest.blank('', project_id='foo')
self.policy = self.useFixture(fixtures.RealPolicyFixture())
@@ -113,20 +115,20 @@ class ServerGroupTestV21(test.NoDBTestCase):
def test_create_server_group_with_no_policies(self):
sgroup = server_group_template()
self.assertRaises(self.validation_error, self.controller.create,
- self.req, body={'server_group': sgroup})
+ self.member_req, body={'server_group': sgroup})
def _create_server_group_normal(self, policies=None, policy=None,
rules=None):
sgroup = server_group_template()
sgroup['policies'] = policies
- res_dict = self.controller.create(self.req,
+ res_dict = self.controller.create(self.member_req,
body={'server_group': sgroup})
self.assertEqual(res_dict['server_group']['name'], 'test')
self.assertTrue(uuidutils.is_uuid_like(res_dict['server_group']['id']))
self.assertEqual(res_dict['server_group']['policies'], policies)
def test_create_server_group_with_new_policy_before_264(self):
- req = fakes.HTTPRequest.blank('', version='2.63')
+ req = fakes.HTTPRequest.member_req('', version='2.63')
policy = 'anti-affinity'
rules = {'max_server_per_host': 3}
# 'policy' isn't an acceptable request key before 2.64
@@ -161,7 +163,7 @@ class ServerGroupTestV21(test.NoDBTestCase):
self.controller.create(self.admin_req, body={'server_group': sgroup})
# test as non-admin
- self.controller.create(self.req, body={'server_group': sgroup})
+ self.controller.create(self.member_req, body={'server_group': sgroup})
def _create_instance(self, ctx, cell):
with context.target_cell(ctx, cell) as cctx:
@@ -288,7 +290,7 @@ class ServerGroupTestV21(test.NoDBTestCase):
path = path or '/os-server-groups?all_projects=True'
if limited:
path += limited
- req = fakes.HTTPRequest.blank(path, version=api_version)
+ reader_req = fakes.HTTPRequest.reader_req(path, version=api_version)
admin_req = fakes.HTTPRequest.blank(path, use_admin_context=True,
version=api_version)
@@ -297,7 +299,7 @@ class ServerGroupTestV21(test.NoDBTestCase):
self.assertEqual(all, res_dict)
# test as non-admin
- res_dict = self.controller.index(req)
+ res_dict = self.controller.index(reader_req)
self.assertEqual(tenant_specific, res_dict)
@mock.patch('nova.objects.InstanceGroupList.get_by_project_id')
@@ -346,25 +348,27 @@ class ServerGroupTestV21(test.NoDBTestCase):
return_get_by_project = return_server_groups()
mock_get_by_project.return_value = return_get_by_project
path = '/os-server-groups'
- req = fakes.HTTPRequest.blank(path, version=api_version)
+ req = fakes.HTTPRequest.reader_req(path, version=api_version)
res_dict = self.controller.index(req)
self.assertEqual(expected, res_dict)
def test_display_members(self):
ctx = context.RequestContext('fake_user', fakes.FAKE_PROJECT_ID)
(ig_uuid, instances, members) = self._create_groups_and_instances(ctx)
- res_dict = self.controller.show(self.req, ig_uuid)
+ res_dict = self.controller.show(self.reader_req, ig_uuid)
result_members = res_dict['server_group']['members']
self.assertEqual(3, len(result_members))
for member in members:
self.assertIn(member, result_members)
def test_display_members_with_nonexistent_group(self):
- self.assertRaises(webob.exc.HTTPNotFound,
- self.controller.show, self.req, uuidsentinel.group)
+ self.assertRaises(
+ webob.exc.HTTPNotFound,
+ self.controller.show, self.reader_req, uuidsentinel.group)
def test_display_active_members_only(self):
- ctx = context.RequestContext('fake_user', fakes.FAKE_PROJECT_ID)
+ ctx = context.RequestContext('fake_user', fakes.FAKE_PROJECT_ID,
+ roles=['member', 'reader'])
(ig_uuid, instances, members) = self._create_groups_and_instances(ctx)
# delete an instance
@@ -378,7 +382,7 @@ class ServerGroupTestV21(test.NoDBTestCase):
self.assertRaises(exception.InstanceNotFound,
objects.Instance.get_by_uuid,
ctx, instances[1].uuid)
- res_dict = self.controller.show(self.req, ig_uuid)
+ res_dict = self.controller.show(self.reader_req, ig_uuid)
result_members = res_dict['server_group']['members']
# check that only the active instance is displayed
self.assertEqual(2, len(result_members))
@@ -392,7 +396,7 @@ class ServerGroupTestV21(test.NoDBTestCase):
self.controller.show(self.admin_req, ig_uuid)
# test as non-admin, same project
- self.controller.show(self.req, ig_uuid)
+ self.controller.show(self.reader_req, ig_uuid)
# test as non-admin, different project
self.assertRaises(webob.exc.HTTPNotFound,
@@ -405,7 +409,7 @@ class ServerGroupTestV21(test.NoDBTestCase):
sgroup = server_group_template(name='good* $%name',
policies=['affinity'])
- res_dict = self.controller.create(self.req,
+ res_dict = self.controller.create(self.member_req,
body={'server_group': sgroup})
self.assertEqual(res_dict['server_group']['name'], 'good* $%name')
@@ -413,99 +417,99 @@ class ServerGroupTestV21(test.NoDBTestCase):
# blank name
sgroup = server_group_template(name='', policies=['test_policy'])
self.assertRaises(self.validation_error, self.controller.create,
- self.req, body={'server_group': sgroup})
+ self.member_req, body={'server_group': sgroup})
# name with length 256
sgroup = server_group_template(name='1234567890' * 26,
policies=['test_policy'])
self.assertRaises(self.validation_error, self.controller.create,
- self.req, body={'server_group': sgroup})
+ self.member_req, body={'server_group': sgroup})
# non-string name
sgroup = server_group_template(name=12, policies=['test_policy'])
self.assertRaises(self.validation_error, self.controller.create,
- self.req, body={'server_group': sgroup})
+ self.member_req, body={'server_group': sgroup})
# name with leading spaces
sgroup = server_group_template(name=' leading spaces',
policies=['test_policy'])
self.assertRaises(self.validation_error, self.controller.create,
- self.req, body={'server_group': sgroup})
+ self.member_req, body={'server_group': sgroup})
# name with trailing spaces
sgroup = server_group_template(name='trailing space ',
policies=['test_policy'])
self.assertRaises(self.validation_error, self.controller.create,
- self.req, body={'server_group': sgroup})
+ self.member_req, body={'server_group': sgroup})
# name with all spaces
sgroup = server_group_template(name=' ',
policies=['test_policy'])
self.assertRaises(self.validation_error, self.controller.create,
- self.req, body={'server_group': sgroup})
+ self.member_req, body={'server_group': sgroup})
# name with unprintable character
sgroup = server_group_template(name='bad\x00name',
policies=['test_policy'])
self.assertRaises(self.validation_error, self.controller.create,
- self.req, body={'server_group': sgroup})
+ self.member_req, body={'server_group': sgroup})
# name with out of range char U0001F4A9
sgroup = server_group_template(name=u"\U0001F4A9",
policies=['affinity'])
self.assertRaises(self.validation_error, self.controller.create,
- self.req, body={'server_group': sgroup})
+ self.member_req, body={'server_group': sgroup})
def test_create_server_group_with_illegal_policies(self):
# blank policy
sgroup = server_group_template(name='fake-name', policies='')
self.assertRaises(self.validation_error, self.controller.create,
- self.req, body={'server_group': sgroup})
+ self.member_req, body={'server_group': sgroup})
# policy as integer
sgroup = server_group_template(name='fake-name', policies=7)
self.assertRaises(self.validation_error, self.controller.create,
- self.req, body={'server_group': sgroup})
+ self.member_req, body={'server_group': sgroup})
# policy as string
sgroup = server_group_template(name='fake-name', policies='invalid')
self.assertRaises(self.validation_error, self.controller.create,
- self.req, body={'server_group': sgroup})
+ self.member_req, body={'server_group': sgroup})
# policy as None
sgroup = server_group_template(name='fake-name', policies=None)
self.assertRaises(self.validation_error, self.controller.create,
- self.req, body={'server_group': sgroup})
+ self.member_req, body={'server_group': sgroup})
def test_create_server_group_conflicting_policies(self):
sgroup = server_group_template()
policies = ['anti-affinity', 'affinity']
sgroup['policies'] = policies
self.assertRaises(self.validation_error, self.controller.create,
- self.req, body={'server_group': sgroup})
+ self.member_req, body={'server_group': sgroup})
def test_create_server_group_with_duplicate_policies(self):
sgroup = server_group_template()
policies = ['affinity', 'affinity']
sgroup['policies'] = policies
self.assertRaises(self.validation_error, self.controller.create,
- self.req, body={'server_group': sgroup})
+ self.member_req, body={'server_group': sgroup})
def test_create_server_group_not_supported(self):
sgroup = server_group_template()
policies = ['storage-affinity', 'anti-affinity', 'rack-affinity']
sgroup['policies'] = policies
self.assertRaises(self.validation_error, self.controller.create,
- self.req, body={'server_group': sgroup})
+ self.member_req, body={'server_group': sgroup})
def test_create_server_group_with_no_body(self):
self.assertRaises(self.validation_error,
- self.controller.create, self.req, body=None)
+ self.controller.create, self.member_req, body=None)
def test_create_server_group_with_no_server_group(self):
body = {'no-instanceGroup': None}
self.assertRaises(self.validation_error,
- self.controller.create, self.req, body=body)
+ self.controller.create, self.member_req, body=body)
def test_list_server_group_by_tenant(self):
self._test_list_server_group_by_tenant(
@@ -527,7 +531,7 @@ class ServerGroupTestV21(test.NoDBTestCase):
self.controller.index(self.admin_req)
# test as non-admin
- self.controller.index(self.req)
+ self.controller.index(self.reader_req)
def test_list_server_group_multiple_param(self):
self._test_list_server_group(api_version=self.wsgi_api_version,
@@ -597,7 +601,7 @@ class ServerGroupTestV21(test.NoDBTestCase):
self.stub_out('nova.objects.InstanceGroup.get_by_uuid',
return_server_group)
- resp = self.controller.delete(self.req, uuidsentinel.sg1_id)
+ resp = self.controller.delete(self.member_req, uuidsentinel.sg1_id)
mock_destroy.assert_called_once_with()
# NOTE: on v2.1, http status code is set as wsgi_code of API
@@ -610,7 +614,7 @@ class ServerGroupTestV21(test.NoDBTestCase):
def test_delete_non_existing_server_group(self):
self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete,
- self.req, 'invalid')
+ self.member_req, 'invalid')
def test_delete_server_group_rbac_default(self):
ctx = context.RequestContext('fake_user', fakes.FAKE_PROJECT_ID)
@@ -621,7 +625,7 @@ class ServerGroupTestV21(test.NoDBTestCase):
# test as non-admin
ig_uuid = self._create_groups_and_instances(ctx)[0]
- self.controller.delete(self.req, ig_uuid)
+ self.controller.delete(self.member_req, ig_uuid)
class ServerGroupTestV213(ServerGroupTestV21):
@@ -648,7 +652,7 @@ class ServerGroupTestV264(ServerGroupTestV213):
def _create_server_group_normal(self, policies=None, policy=None,
rules=None):
- req = fakes.HTTPRequest.blank('', version=self.wsgi_api_version)
+ req = fakes.HTTPRequest.member_req('', version=self.wsgi_api_version)
sgroup = server_group_template()
sgroup['rules'] = rules or {}
sgroup['policy'] = policy
@@ -673,7 +677,7 @@ class ServerGroupTestV264(ServerGroupTestV213):
self.assertEqual(res_dict['server_group']['rules'], {})
def _display_server_group(self, uuid):
- req = fakes.HTTPRequest.blank('', version=self.wsgi_api_version)
+ req = fakes.HTTPRequest.reader_req('', version=self.wsgi_api_version)
group = self.controller.show(req, uuid)
return group
@@ -689,7 +693,7 @@ class ServerGroupTestV264(ServerGroupTestV213):
self.assertEqual(res_dict['server_group']['rules'], rules)
def test_create_affinity_server_group_with_invalid_policy(self):
- req = fakes.HTTPRequest.blank('', version=self.wsgi_api_version)
+ req = fakes.HTTPRequest.member_req('', version=self.wsgi_api_version)
sgroup = server_group_template(policy='affinity',
rules={'max_server_per_host': 3})
result = self.assertRaises(webob.exc.HTTPBadRequest,
@@ -697,7 +701,7 @@ class ServerGroupTestV264(ServerGroupTestV213):
self.assertIn("Only anti-affinity policy supports rules", str(result))
def test_create_anti_affinity_server_group_with_invalid_rules(self):
- req = fakes.HTTPRequest.blank('', version=self.wsgi_api_version)
+ req = fakes.HTTPRequest.member_req('', version=self.wsgi_api_version)
# A negative test for key is unknown, the value is not positive
# and not integer
invalid_rules = [{'unknown_key': '3'},
@@ -717,7 +721,7 @@ class ServerGroupTestV264(ServerGroupTestV213):
return_value=32)
def test_create_server_group_with_low_version_compute_service(self,
mock_get_v):
- req = fakes.HTTPRequest.blank('', version=self.wsgi_api_version)
+ req = fakes.HTTPRequest.member_req('', version=self.wsgi_api_version)
sgroup = server_group_template(policy='anti-affinity',
rules={'max_server_per_host': 3})
result = self.assertRaises(
@@ -733,7 +737,7 @@ class ServerGroupTestV264(ServerGroupTestV213):
self._create_server_group_normal(policy=policy)
def test_policies_since_264(self):
- req = fakes.HTTPRequest.blank('', version=self.wsgi_api_version)
+ req = fakes.HTTPRequest.member_req('', version=self.wsgi_api_version)
# 'policies' isn't allowed in request >= 2.64
sgroup = server_group_template(policies=['anti-affinity'])
self.assertRaises(
@@ -741,14 +745,14 @@ class ServerGroupTestV264(ServerGroupTestV213):
req, body={'server_group': sgroup})
def test_create_server_group_without_policy(self):
- req = fakes.HTTPRequest.blank('', version=self.wsgi_api_version)
+ req = fakes.HTTPRequest.member_req('', version=self.wsgi_api_version)
# 'policy' is required request key in request >= 2.64
sgroup = server_group_template()
self.assertRaises(self.validation_error, self.controller.create,
req, body={'server_group': sgroup})
def test_create_server_group_with_illegal_policies(self):
- req = fakes.HTTPRequest.blank('', version=self.wsgi_api_version)
+ req = fakes.HTTPRequest.member_req('', version=self.wsgi_api_version)
# blank policy
sgroup = server_group_template(policy='')
self.assertRaises(self.validation_error, self.controller.create,
@@ -770,7 +774,7 @@ class ServerGroupTestV264(ServerGroupTestV213):
req, body={'server_group': sgroup})
def test_additional_params(self):
- req = fakes.HTTPRequest.blank('', version=self.wsgi_api_version)
+ req = fakes.HTTPRequest.member_req('', version=self.wsgi_api_version)
sgroup = server_group_template(unknown='unknown')
self.assertRaises(self.validation_error, self.controller.create,
req, body={'server_group': sgroup})
@@ -785,7 +789,7 @@ class ServerGroupTestV275(ServerGroupTestV264):
path='/os-server-groups?dummy=False&all_projects=True')
def test_list_server_group_additional_param(self):
- req = fakes.HTTPRequest.blank('/os-server-groups?dummy=False',
- version=self.wsgi_api_version)
+ req = fakes.HTTPRequest.reader_req('/os-server-groups?dummy=False',
+ version=self.wsgi_api_version)
self.assertRaises(self.validation_error, self.controller.index,
req)
diff --git a/nova/tests/unit/api/openstack/compute/test_server_metadata.py b/nova/tests/unit/api/openstack/compute/test_server_metadata.py
index a454597305..9b420dde17 100644
--- a/nova/tests/unit/api/openstack/compute/test_server_metadata.py
+++ b/nova/tests/unit/api/openstack/compute/test_server_metadata.py
@@ -13,7 +13,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from oslo_config import cfg
from oslo_serialization import jsonutils
from oslo_utils.fixture import uuidsentinel as uuids
diff --git a/nova/tests/unit/api/openstack/compute/test_server_migrations.py b/nova/tests/unit/api/openstack/compute/test_server_migrations.py
index 8d798d434c..c5d8556751 100644
--- a/nova/tests/unit/api/openstack/compute/test_server_migrations.py
+++ b/nova/tests/unit/api/openstack/compute/test_server_migrations.py
@@ -15,8 +15,8 @@
import copy
import datetime
+from unittest import mock
-import mock
from oslo_utils.fixture import uuidsentinel as uuids
import webob
diff --git a/nova/tests/unit/api/openstack/compute/test_server_password.py b/nova/tests/unit/api/openstack/compute/test_server_password.py
index e34ceb90e9..2751eee709 100644
--- a/nova/tests/unit/api/openstack/compute/test_server_password.py
+++ b/nova/tests/unit/api/openstack/compute/test_server_password.py
@@ -13,7 +13,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from nova.api.openstack.compute import server_password \
as server_password_v21
diff --git a/nova/tests/unit/api/openstack/compute/test_server_reset_state.py b/nova/tests/unit/api/openstack/compute/test_server_reset_state.py
index 3462cf21ac..3a0c9ca1e2 100644
--- a/nova/tests/unit/api/openstack/compute/test_server_reset_state.py
+++ b/nova/tests/unit/api/openstack/compute/test_server_reset_state.py
@@ -12,7 +12,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from oslo_utils import uuidutils
import webob
diff --git a/nova/tests/unit/api/openstack/compute/test_server_start_stop.py b/nova/tests/unit/api/openstack/compute/test_server_start_stop.py
index 60d12d0c43..f604652622 100644
--- a/nova/tests/unit/api/openstack/compute/test_server_start_stop.py
+++ b/nova/tests/unit/api/openstack/compute/test_server_start_stop.py
@@ -12,7 +12,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from oslo_utils.fixture import uuidsentinel as uuids
import webob
diff --git a/nova/tests/unit/api/openstack/compute/test_server_tags.py b/nova/tests/unit/api/openstack/compute/test_server_tags.py
index b121c75c3a..4e4609d778 100644
--- a/nova/tests/unit/api/openstack/compute/test_server_tags.py
+++ b/nova/tests/unit/api/openstack/compute/test_server_tags.py
@@ -10,7 +10,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from webob import exc
from nova.api.openstack.compute import server_tags
diff --git a/nova/tests/unit/api/openstack/compute/test_server_topology.py b/nova/tests/unit/api/openstack/compute/test_server_topology.py
index 3d8f6dc908..63d5f7a5c1 100644
--- a/nova/tests/unit/api/openstack/compute/test_server_topology.py
+++ b/nova/tests/unit/api/openstack/compute/test_server_topology.py
@@ -11,7 +11,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from oslo_utils.fixture import uuidsentinel as uuids
from webob import exc
diff --git a/nova/tests/unit/api/openstack/compute/test_servers.py b/nova/tests/unit/api/openstack/compute/test_servers.py
index 31739ed7ab..8903de0c3c 100644
--- a/nova/tests/unit/api/openstack/compute/test_servers.py
+++ b/nova/tests/unit/api/openstack/compute/test_servers.py
@@ -17,13 +17,14 @@
import collections
import copy
import datetime
+from unittest import mock
+
import ddt
import functools
from urllib import parse as urlparse
import fixtures
import iso8601
-import mock
from oslo_policy import policy as oslo_policy
from oslo_serialization import base64
from oslo_serialization import jsonutils
@@ -2087,10 +2088,10 @@ class ServersControllerTestV216(_ServersControllerTest):
return server_dict
- @mock.patch('nova.compute.api.API.get_instance_host_status')
- def _verify_host_status_policy_behavior(self, func, mock_get_host_status):
+ def _verify_host_status_policy_behavior(self, func):
# Set policy to disallow both host_status cases and verify we don't
# call the get_instance_host_status compute RPC API.
+ self.mock_get_instance_host_status.reset_mock()
rules = {
'os_compute_api:servers:show:host_status': '!',
'os_compute_api:servers:show:host_status:unknown-only': '!',
@@ -2098,7 +2099,7 @@ class ServersControllerTestV216(_ServersControllerTest):
orig_rules = policy.get_rules()
policy.set_rules(oslo_policy.Rules.from_dict(rules), overwrite=False)
func()
- mock_get_host_status.assert_not_called()
+ self.mock_get_instance_host_status.assert_not_called()
# Restore the original rules.
policy.set_rules(orig_rules)
@@ -2638,15 +2639,13 @@ class ServersControllerTestV275(ControllerTest):
microversion = '2.75'
- @mock.patch('nova.compute.api.API.get_all')
- def test_get_servers_additional_query_param_old_version(self, mock_get):
+ def test_get_servers_additional_query_param_old_version(self):
req = fakes.HTTPRequest.blank(self.path_with_query % 'unknown=1',
use_admin_context=True,
version='2.74')
self.controller.index(req)
- @mock.patch('nova.compute.api.API.get_all')
- def test_get_servers_ignore_sort_key_old_version(self, mock_get):
+ def test_get_servers_ignore_sort_key_old_version(self):
req = fakes.HTTPRequest.blank(
self.path_with_query % 'sort_key=deleted',
use_admin_context=True, version='2.74')
@@ -3584,13 +3583,13 @@ class ServersControllerRebuildTestV263(ControllerTest):
},
}
- @mock.patch('nova.compute.api.API.get')
- def _rebuild_server(self, mock_get, certs=None,
- conf_enabled=True, conf_certs=None):
+ def _rebuild_server(self, certs=None, conf_enabled=True, conf_certs=None):
ctx = self.req.environ['nova.context']
- mock_get.return_value = fakes.stub_instance_obj(ctx,
- vm_state=vm_states.ACTIVE, trusted_certs=certs,
- project_id=self.req_project_id, user_id=self.req_user_id)
+ self.mock_get.side_effect = None
+ self.mock_get.return_value = fakes.stub_instance_obj(
+ ctx, vm_state=vm_states.ACTIVE, trusted_certs=certs,
+ project_id=self.req_project_id, user_id=self.req_user_id
+ )
self.flags(default_trusted_certificate_ids=conf_certs, group='glance')
@@ -3743,10 +3742,10 @@ class ServersControllerRebuildTestV271(ControllerTest):
}
}
- @mock.patch('nova.compute.api.API.get')
- def _rebuild_server(self, mock_get):
+ def _rebuild_server(self):
ctx = self.req.environ['nova.context']
- mock_get.return_value = fakes.stub_instance_obj(ctx,
+ self.mock_get.side_effect = None
+ self.mock_get.return_value = fakes.stub_instance_obj(ctx,
vm_state=vm_states.ACTIVE, project_id=self.req_project_id,
user_id=self.req_user_id)
server = self.controller._action_rebuild(
@@ -8023,7 +8022,7 @@ class ServersViewBuilderTestV269(_ServersViewBuilderTest):
version=self.microversion)
def test_get_server_list_detail_with_down_cells(self):
- # Fake out 1 partially constructued instance and one full instance.
+ # Fake out 1 partially constructed instance and one full instance.
self.instances = [
self.instance,
objects.Instance(
@@ -8151,7 +8150,7 @@ class ServersViewBuilderTestV269(_ServersViewBuilderTest):
self.assertThat(output, matchers.DictMatches(expected))
def test_get_server_list_with_down_cells(self):
- # Fake out 1 partially constructued instance and one full instance.
+ # Fake out 1 partially constructed instance and one full instance.
self.instances = [
self.instance,
objects.Instance(
@@ -8203,7 +8202,7 @@ class ServersViewBuilderTestV269(_ServersViewBuilderTest):
self.assertThat(output, matchers.DictMatches(expected))
def test_get_server_with_down_cells(self):
- # Fake out 1 partially constructued instance.
+ # Fake out 1 partially constructed instance.
self.instance = objects.Instance(
context=self.ctxt,
uuid=self.uuid,
@@ -8266,7 +8265,7 @@ class ServersViewBuilderTestV269(_ServersViewBuilderTest):
self.assertThat(output, matchers.DictMatches(expected))
def test_get_server_without_image_avz_user_id_set_from_down_cells(self):
- # Fake out 1 partially constructued instance.
+ # Fake out 1 partially constructed instance.
self.instance = objects.Instance(
context=self.ctxt,
uuid=self.uuid,
diff --git a/nova/tests/unit/api/openstack/compute/test_services.py b/nova/tests/unit/api/openstack/compute/test_services.py
index 5d83bc5a91..f237acc15a 100644
--- a/nova/tests/unit/api/openstack/compute/test_services.py
+++ b/nova/tests/unit/api/openstack/compute/test_services.py
@@ -14,9 +14,9 @@
import copy
import datetime
+from unittest import mock
from keystoneauth1 import exceptions as ks_exc
-import mock
from oslo_utils import fixture as utils_fixture
from oslo_utils.fixture import uuidsentinel
import webob.exc
diff --git a/nova/tests/unit/api/openstack/compute/test_shelve.py b/nova/tests/unit/api/openstack/compute/test_shelve.py
index 68e523be47..bfa8d2d055 100644
--- a/nova/tests/unit/api/openstack/compute/test_shelve.py
+++ b/nova/tests/unit/api/openstack/compute/test_shelve.py
@@ -12,10 +12,10 @@
# License for the specific language governing permissions and limitations
# under the License.
-import fixtures
-import mock
+from unittest import mock
import ddt
+import fixtures
from oslo_serialization import jsonutils
from oslo_utils.fixture import uuidsentinel as uuids
import webob
@@ -134,13 +134,17 @@ class UnshelveServerControllerTestV277(test.NoDBTestCase):
'availability_zone': 'us-east'
}}
self.req.body = jsonutils.dump_as_bytes(body)
- self.req.api_version_request = (api_version_request.
- APIVersionRequest('2.76'))
- with mock.patch.object(self.controller.compute_api,
- 'unshelve') as mock_unshelve:
+ self.req.api_version_request = (
+ api_version_request.APIVersionRequest('2.76')
+ )
+ with mock.patch.object(
+ self.controller.compute_api, 'unshelve'
+ ) as mock_unshelve:
self.controller._unshelve(self.req, fakes.FAKE_UUID, body=body)
mock_unshelve.assert_called_once_with(
- self.req.environ['nova.context'], instance, new_az=None)
+ self.req.environ['nova.context'],
+ instance,
+ )
@mock.patch('nova.compute.api.API.unshelve')
@mock.patch('nova.api.openstack.common.get_instance')
@@ -158,7 +162,9 @@ class UnshelveServerControllerTestV277(test.NoDBTestCase):
APIVersionRequest('2.76'))
self.controller._unshelve(self.req, fakes.FAKE_UUID, body=body)
mock_unshelve.assert_called_once_with(
- self.req.environ['nova.context'], instance, new_az=None)
+ self.req.environ['nova.context'],
+ instance,
+ )
@mock.patch('nova.compute.api.API.unshelve')
@mock.patch('nova.api.openstack.common.get_instance')
@@ -193,6 +199,238 @@ class UnshelveServerControllerTestV277(test.NoDBTestCase):
'availability_zone': None
}}
self.req.body = jsonutils.dump_as_bytes(body)
+ self.assertRaises(
+ exception.ValidationError,
+ self.controller._unshelve,
+ self.req,
+ fakes.FAKE_UUID, body=body)
+
+ def test_unshelve_with_additional_param(self):
+ body = {
+ 'unshelve': {
+ 'availability_zone': 'us-east',
+ 'additional_param': 1
+ }}
+ self.req.body = jsonutils.dump_as_bytes(body)
+ exc = self.assertRaises(
+ exception.ValidationError,
+ self.controller._unshelve, self.req,
+ fakes.FAKE_UUID, body=body)
+ self.assertIn("Additional properties are not allowed", str(exc))
+
+
+class UnshelveServerControllerTestV291(test.NoDBTestCase):
+ """Server controller test for microversion 2.91
+
+ Add host parameter to unshelve a shelved-offloaded server of
+ 2.91 microversion.
+ """
+ wsgi_api_version = '2.91'
+
+ def setUp(self):
+ super(UnshelveServerControllerTestV291, self).setUp()
+ self.controller = shelve_v21.ShelveController()
+ self.req = fakes.HTTPRequest.blank(
+ '/%s/servers/a/action' % fakes.FAKE_PROJECT_ID,
+ use_admin_context=True, version=self.wsgi_api_version)
+
+ def fake_get_instance(self):
+ ctxt = self.req.environ['nova.context']
+ return fake_instance.fake_instance_obj(
+ ctxt, uuid=fakes.FAKE_UUID, vm_state=vm_states.SHELVED_OFFLOADED)
+
+ @mock.patch('nova.api.openstack.common.get_instance')
+ def test_unshelve_with_az_pre_2_91(self, mock_get_instance):
+ """Make sure specifying an AZ before microversion 2.91
+ is still working.
+ """
+ instance = self.fake_get_instance()
+ mock_get_instance.return_value = instance
+
+ body = {
+ 'unshelve': {
+ 'availability_zone': 'us-east',
+ }}
+ self.req.body = jsonutils.dump_as_bytes(body)
+ self.req.api_version_request = (
+ api_version_request.APIVersionRequest('2.77'))
+ with mock.patch.object(
+ self.controller.compute_api, 'unshelve'
+ ) as mock_unshelve:
+ self.controller._unshelve(self.req, fakes.FAKE_UUID, body=body)
+ mock_unshelve.assert_called_once_with(
+ self.req.environ['nova.context'],
+ instance,
+ new_az='us-east',
+ )
+
+ @mock.patch('nova.api.openstack.common.get_instance')
+ def test_unshelve_without_parameters_2_91(self, mock_get_instance):
+ """Make sure not specifying parameters with microversion 2.91
+ is working.
+ """
+ instance = self.fake_get_instance()
+ mock_get_instance.return_value = instance
+
+ body = {
+ 'unshelve': None
+ }
+ self.req.body = jsonutils.dump_as_bytes(body)
+ self.req.api_version_request = (
+ api_version_request.APIVersionRequest('2.91'))
+ with mock.patch.object(
+ self.controller.compute_api, 'unshelve') as mock_unshelve:
+ self.controller._unshelve(self.req, fakes.FAKE_UUID, body=body)
+ mock_unshelve.assert_called_once_with(
+ self.req.environ['nova.context'],
+ instance,
+ )
+
+ @mock.patch('nova.api.openstack.common.get_instance')
+ def test_unshelve_with_az_2_91(self, mock_get_instance):
+ """Make sure specifying an AZ with microversion 2.91
+ is working.
+ """
+ instance = self.fake_get_instance()
+ mock_get_instance.return_value = instance
+
+ body = {
+ 'unshelve': {
+ 'availability_zone': 'us-east',
+ }}
+ self.req.body = jsonutils.dump_as_bytes(body)
+ self.req.api_version_request = (
+ api_version_request.APIVersionRequest('2.91'))
+ with mock.patch.object(
+ self.controller.compute_api, 'unshelve') as mock_unshelve:
+ self.controller._unshelve(self.req, fakes.FAKE_UUID, body=body)
+ mock_unshelve.assert_called_once_with(
+ self.req.environ['nova.context'],
+ instance,
+ new_az='us-east',
+ host=None,
+ )
+
+ @mock.patch('nova.api.openstack.common.get_instance')
+ def test_unshelve_with_az_none_2_91(self, mock_get_instance):
+ """Make sure specifying an AZ to none (unpin server)
+ is working.
+ """
+ instance = self.fake_get_instance()
+ mock_get_instance.return_value = instance
+
+ body = {
+ 'unshelve': {
+ 'availability_zone': None,
+ }}
+ self.req.body = jsonutils.dump_as_bytes(body)
+ self.req.api_version_request = (
+ api_version_request.APIVersionRequest('2.91'))
+ with mock.patch.object(
+ self.controller.compute_api, 'unshelve') as mock_unshelve:
+ self.controller._unshelve(self.req, fakes.FAKE_UUID, body=body)
+ mock_unshelve.assert_called_once_with(
+ self.req.environ['nova.context'],
+ instance,
+ new_az=None,
+ host=None,
+ )
+
+ @mock.patch('nova.api.openstack.common.get_instance')
+ def test_unshelve_with_host_2_91(self, mock_get_instance):
+ """Make sure specifying a host with microversion 2.91
+ is working.
+ """
+ instance = self.fake_get_instance()
+ mock_get_instance.return_value = instance
+
+ body = {
+ 'unshelve': {
+ 'host': 'server02',
+ }}
+ self.req.body = jsonutils.dump_as_bytes(body)
+ self.req.api_version_request = (
+ api_version_request.APIVersionRequest('2.91'))
+ with mock.patch.object(
+ self.controller.compute_api, 'unshelve') as mock_unshelve:
+ self.controller._unshelve(self.req, fakes.FAKE_UUID, body=body)
+ mock_unshelve.assert_called_once_with(
+ self.req.environ['nova.context'],
+ instance,
+ host='server02',
+ )
+
+ @mock.patch('nova.compute.api.API.unshelve')
+ @mock.patch('nova.api.openstack.common.get_instance')
+ def test_unshelve_with_az_and_host_with_v2_91(
+ self, mock_get_instance, mock_unshelve):
+ """Make sure specifying a host and an availability_zone with
+ microversion 2.91 is working.
+ """
+ instance = self.fake_get_instance()
+ mock_get_instance.return_value = instance
+
+ body = {
+ 'unshelve': {
+ 'availability_zone': 'us-east',
+ 'host': 'server01',
+ }}
+ self.req.body = jsonutils.dump_as_bytes(body)
+ self.req.api_version_request = (
+ api_version_request.APIVersionRequest('2.91'))
+
+ self.controller._unshelve(self.req, fakes.FAKE_UUID, body=body)
+ self.controller.compute_api.unshelve.assert_called_once_with(
+ self.req.environ['nova.context'],
+ instance,
+ new_az='us-east',
+ host='server01',
+ )
+
+ def test_invalid_az_name_with_int(self):
+ body = {
+ 'unshelve': {
+ 'host': 1234
+ }}
+ self.req.body = jsonutils.dump_as_bytes(body)
+ self.assertRaises(
+ exception.ValidationError,
+ self.controller._unshelve,
+ self.req,
+ fakes.FAKE_UUID,
+ body=body)
+
+ def test_no_az_value(self):
+ body = {
+ 'unshelve': {
+ 'host': None
+ }}
+ self.req.body = jsonutils.dump_as_bytes(body)
+ self.assertRaises(
+ exception.ValidationError,
+ self.controller._unshelve,
+ self.req,
+ fakes.FAKE_UUID, body=body)
+
+ def test_invalid_host_fqdn_with_int(self):
+ body = {
+ 'unshelve': {
+ 'host': 1234
+ }}
+ self.req.body = jsonutils.dump_as_bytes(body)
+ self.assertRaises(
+ exception.ValidationError,
+ self.controller._unshelve,
+ self.req,
+ fakes.FAKE_UUID,
+ body=body)
+
+ def test_no_host(self):
+ body = {
+ 'unshelve': {
+ 'host': None
+ }}
+ self.req.body = jsonutils.dump_as_bytes(body)
self.assertRaises(exception.ValidationError,
self.controller._unshelve,
self.req, fakes.FAKE_UUID,
@@ -201,7 +439,7 @@ class UnshelveServerControllerTestV277(test.NoDBTestCase):
def test_unshelve_with_additional_param(self):
body = {
'unshelve': {
- 'availability_zone': 'us-east',
+ 'host': 'server01',
'additional_param': 1
}}
self.req.body = jsonutils.dump_as_bytes(body)
@@ -209,4 +447,4 @@ class UnshelveServerControllerTestV277(test.NoDBTestCase):
exception.ValidationError,
self.controller._unshelve, self.req,
fakes.FAKE_UUID, body=body)
- self.assertIn("Additional properties are not allowed", str(exc))
+ self.assertIn("Invalid input for field/attribute unshelve.", str(exc))
diff --git a/nova/tests/unit/api/openstack/compute/test_simple_tenant_usage.py b/nova/tests/unit/api/openstack/compute/test_simple_tenant_usage.py
index 5794fdf061..a7dcfae558 100644
--- a/nova/tests/unit/api/openstack/compute/test_simple_tenant_usage.py
+++ b/nova/tests/unit/api/openstack/compute/test_simple_tenant_usage.py
@@ -14,8 +14,8 @@
# under the License.
import datetime
+from unittest import mock
-import mock
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import timeutils
import webob
diff --git a/nova/tests/unit/api/openstack/compute/test_snapshots.py b/nova/tests/unit/api/openstack/compute/test_snapshots.py
index b23ed50865..2e133506a3 100644
--- a/nova/tests/unit/api/openstack/compute/test_snapshots.py
+++ b/nova/tests/unit/api/openstack/compute/test_snapshots.py
@@ -13,7 +13,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
import webob
from nova.api.openstack.compute import volumes as volumes_v21
diff --git a/nova/tests/unit/api/openstack/compute/test_suspend_server.py b/nova/tests/unit/api/openstack/compute/test_suspend_server.py
index 6eeb2b4549..a44297362c 100644
--- a/nova/tests/unit/api/openstack/compute/test_suspend_server.py
+++ b/nova/tests/unit/api/openstack/compute/test_suspend_server.py
@@ -12,8 +12,9 @@
# License for the specific language governing permissions and limitations
# under the License.
+from unittest import mock
+
import ddt
-import mock
from oslo_utils.fixture import uuidsentinel as uuids
import webob
diff --git a/nova/tests/unit/api/openstack/compute/test_tenant_networks.py b/nova/tests/unit/api/openstack/compute/test_tenant_networks.py
index d05c85c508..c6de561b11 100644
--- a/nova/tests/unit/api/openstack/compute/test_tenant_networks.py
+++ b/nova/tests/unit/api/openstack/compute/test_tenant_networks.py
@@ -13,8 +13,8 @@
# under the License.
import copy
+from unittest import mock
-import mock
from oslo_config import cfg
from oslo_utils.fixture import uuidsentinel as uuids
import webob
diff --git a/nova/tests/unit/api/openstack/compute/test_volumes.py b/nova/tests/unit/api/openstack/compute/test_volumes.py
index a24c104c93..5b4a2d8b1a 100644
--- a/nova/tests/unit/api/openstack/compute/test_volumes.py
+++ b/nova/tests/unit/api/openstack/compute/test_volumes.py
@@ -15,10 +15,10 @@
# under the License.
import datetime
+from unittest import mock
import urllib
import fixtures
-import mock
from oslo_serialization import jsonutils
from oslo_utils import encodeutils
from oslo_utils.fixture import uuidsentinel as uuids
@@ -1889,8 +1889,7 @@ class AssistedSnapshotDeleteTestCaseV21(test.NoDBTestCase):
req, '5')
def _test_assisted_delete_instance_conflict(self, api_error):
- # unset the stub on volume_snapshot_delete from setUp
- self.mock_volume_snapshot_delete.stop()
+ self.mock_volume_snapshot_delete.side_effect = api_error
params = {
'delete_info': jsonutils.dumps({'volume_id': '1'}),
}
@@ -1899,10 +1898,9 @@ class AssistedSnapshotDeleteTestCaseV21(test.NoDBTestCase):
urllib.parse.urlencode(params),
version=self.microversion)
req.method = 'DELETE'
- with mock.patch.object(compute_api.API, 'volume_snapshot_delete',
- side_effect=api_error):
- self.assertRaises(
- webob.exc.HTTPBadRequest, self.controller.delete, req, '5')
+
+ self.assertRaises(
+ webob.exc.HTTPBadRequest, self.controller.delete, req, '5')
def test_assisted_delete_instance_invalid_state(self):
api_error = exception.InstanceInvalidState(
diff --git a/nova/tests/unit/api/openstack/fakes.py b/nova/tests/unit/api/openstack/fakes.py
index 8cf90ddebe..9ac970f787 100644
--- a/nova/tests/unit/api/openstack/fakes.py
+++ b/nova/tests/unit/api/openstack/fakes.py
@@ -240,6 +240,9 @@ class HTTPRequest(os_wsgi.Request):
def blank(cls, *args, **kwargs):
defaults = {'base_url': 'http://localhost/v2'}
use_admin_context = kwargs.pop('use_admin_context', False)
+ roles = kwargs.pop('roles', [])
+ if use_admin_context:
+ roles.append('admin')
project_id = kwargs.pop('project_id', FAKE_PROJECT_ID)
version = kwargs.pop('version', os_wsgi.DEFAULT_API_VERSION)
defaults.update(kwargs)
@@ -247,10 +250,19 @@ class HTTPRequest(os_wsgi.Request):
out.environ['nova.context'] = FakeRequestContext(
user_id='fake_user',
project_id=project_id,
- is_admin=use_admin_context)
+ is_admin=use_admin_context,
+ roles=roles)
out.api_version_request = api_version.APIVersionRequest(version)
return out
+ @classmethod
+ def member_req(cls, *args, **kwargs):
+ return cls.blank(*args, roles=['member', 'reader'], **kwargs)
+
+ @classmethod
+ def reader_req(cls, *args, **kwargs):
+ return cls.blank(*args, roles=['reader'], **kwargs)
+
class HTTPRequestV21(HTTPRequest):
pass
diff --git a/nova/tests/unit/api/openstack/test_common.py b/nova/tests/unit/api/openstack/test_common.py
index 4666413e27..7fe98bd52e 100644
--- a/nova/tests/unit/api/openstack/test_common.py
+++ b/nova/tests/unit/api/openstack/test_common.py
@@ -17,7 +17,8 @@
Test suites for 'common' code used throughout the OpenStack HTTP API.
"""
-import mock
+from unittest import mock
+
from testtools import matchers
import webob
import webob.exc
diff --git a/nova/tests/unit/api/openstack/test_faults.py b/nova/tests/unit/api/openstack/test_faults.py
index 1bd56a87c5..c7dd5c0a9d 100644
--- a/nova/tests/unit/api/openstack/test_faults.py
+++ b/nova/tests/unit/api/openstack/test_faults.py
@@ -14,7 +14,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from oslo_serialization import jsonutils
import webob
import webob.dec
diff --git a/nova/tests/unit/api/openstack/test_requestlog.py b/nova/tests/unit/api/openstack/test_requestlog.py
index 0ea91439cc..7e79e1b079 100644
--- a/nova/tests/unit/api/openstack/test_requestlog.py
+++ b/nova/tests/unit/api/openstack/test_requestlog.py
@@ -13,7 +13,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
import fixtures as fx
import testtools
diff --git a/nova/tests/unit/api/openstack/test_wsgi.py b/nova/tests/unit/api/openstack/test_wsgi.py
index e0cf8f6fd8..76554e1fcb 100644
--- a/nova/tests/unit/api/openstack/test_wsgi.py
+++ b/nova/tests/unit/api/openstack/test_wsgi.py
@@ -10,7 +10,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from oslo_serialization import jsonutils
import testscenarios
import webob
diff --git a/nova/tests/unit/api/openstack/test_wsgi_app.py b/nova/tests/unit/api/openstack/test_wsgi_app.py
index 247886b9dd..0eb7011c11 100644
--- a/nova/tests/unit/api/openstack/test_wsgi_app.py
+++ b/nova/tests/unit/api/openstack/test_wsgi_app.py
@@ -11,9 +11,9 @@
# under the License.
import tempfile
+from unittest import mock
import fixtures
-import mock
from oslo_config import fixture as config_fixture
from oslotest import base
@@ -104,3 +104,18 @@ document_root = /tmp
'disable_compute_service_check_for_ffu', True,
group='workarounds')
wsgi_app._setup_service('myhost', 'api')
+
+ def test__get_config_files_empty_env(self):
+ env = {}
+ result = wsgi_app._get_config_files(env)
+ expected = ['/etc/nova/api-paste.ini', '/etc/nova/nova.conf']
+ self.assertEqual(result, expected)
+
+ def test__get_config_files_with_env(self):
+ env = {
+ "OS_NOVA_CONFIG_DIR": "/nova",
+ "OS_NOVA_CONFIG_FILES": "api.conf",
+ }
+ result = wsgi_app._get_config_files(env)
+ expected = ['/nova/api.conf']
+ self.assertEqual(result, expected)
diff --git a/nova/tests/unit/api/test_auth.py b/nova/tests/unit/api/test_auth.py
index 3be245b90e..3bc5f51b04 100644
--- a/nova/tests/unit/api/test_auth.py
+++ b/nova/tests/unit/api/test_auth.py
@@ -12,7 +12,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from oslo_middleware import request_id
from oslo_serialization import jsonutils
import webob
diff --git a/nova/tests/unit/api/test_wsgi.py b/nova/tests/unit/api/test_wsgi.py
index b2701dc723..b8f215c730 100644
--- a/nova/tests/unit/api/test_wsgi.py
+++ b/nova/tests/unit/api/test_wsgi.py
@@ -20,8 +20,8 @@ Test WSGI basics and provide some helper functions for other WSGI tests.
"""
import sys
+from unittest import mock
-import mock
import routes
import webob
diff --git a/nova/tests/unit/api/validation/extra_specs/test_validators.py b/nova/tests/unit/api/validation/extra_specs/test_validators.py
index 969fb9b648..a8911aadad 100644
--- a/nova/tests/unit/api/validation/extra_specs/test_validators.py
+++ b/nova/tests/unit/api/validation/extra_specs/test_validators.py
@@ -28,7 +28,7 @@ class TestValidators(test.NoDBTestCase):
"""
namespaces = {
'accel', 'aggregate_instance_extra_specs', 'capabilities', 'hw',
- 'hw_rng', 'hw_video', 'os', 'pci_passthrough', 'powervm', 'quota',
+ 'hw_rng', 'hw_video', 'os', 'pci_passthrough', 'quota',
'resources(?P<group>([a-zA-Z0-9_-]{1,64})?)',
'trait(?P<group>([a-zA-Z0-9_-]{1,64})?)', 'vmware',
}
@@ -74,6 +74,10 @@ class TestValidators(test.NoDBTestCase):
('hw:pci_numa_affinity_policy', 'preferred'),
('hw:pci_numa_affinity_policy', 'socket'),
('hw:cpu_policy', 'mixed'),
+ ('hw:viommu_model', 'auto'),
+ ('hw:viommu_model', 'intel'),
+ ('hw:viommu_model', 'smmuv3'),
+ ('hw:viommu_model', 'virtio'),
)
for key, value in valid_specs:
validators.validate(key, value)
@@ -92,6 +96,7 @@ class TestValidators(test.NoDBTestCase):
('hw:pci_numa_affinity_policy', 'requird'),
('hw:pci_numa_affinity_policy', 'prefrred'),
('hw:pci_numa_affinity_policy', 'socet'),
+ ('hw:viommu_model', 'autt'),
)
for key, value in invalid_specs:
with testtools.ExpectedException(exception.ValidationError):
@@ -101,9 +106,7 @@ class TestValidators(test.NoDBTestCase):
valid_specs = (
('hw:numa_nodes', '1'),
('os:monitors', '1'),
- ('powervm:shared_weight', '1'),
('os:monitors', '8'),
- ('powervm:shared_weight', '255'),
)
for key, value in valid_specs:
validators.validate(key, value)
@@ -113,9 +116,7 @@ class TestValidators(test.NoDBTestCase):
('hw:serial_port_count', '!'), # NaN
('hw:numa_nodes', '0'), # has min
('os:monitors', '0'), # has min
- ('powervm:shared_weight', '-1'), # has min
('os:monitors', '9'), # has max
- ('powervm:shared_weight', '256'), # has max
)
for key, value in invalid_specs:
with testtools.ExpectedException(exception.ValidationError):
diff --git a/nova/tests/unit/cmd/test_baseproxy.py b/nova/tests/unit/cmd/test_baseproxy.py
index 34f911cd83..25f3905f24 100644
--- a/nova/tests/unit/cmd/test_baseproxy.py
+++ b/nova/tests/unit/cmd/test_baseproxy.py
@@ -13,9 +13,9 @@
# limitations under the License.
from io import StringIO
+from unittest import mock
import fixtures
-import mock
from oslo_log import log as logging
from oslo_reports import guru_meditation_report as gmr
diff --git a/nova/tests/unit/cmd/test_common.py b/nova/tests/unit/cmd/test_common.py
index cabb54f9d4..a32073c297 100644
--- a/nova/tests/unit/cmd/test_common.py
+++ b/nova/tests/unit/cmd/test_common.py
@@ -19,9 +19,9 @@
from io import StringIO
import sys
+from unittest import mock
import fixtures
-import mock
from nova.cmd import common as cmd_common
from nova import exception
diff --git a/nova/tests/unit/cmd/test_compute.py b/nova/tests/unit/cmd/test_compute.py
index acfcea50d2..e465b026aa 100644
--- a/nova/tests/unit/cmd/test_compute.py
+++ b/nova/tests/unit/cmd/test_compute.py
@@ -13,8 +13,8 @@
# limitations under the License.
import contextlib
+from unittest import mock
-import mock
from nova.cmd import compute
from nova import context
diff --git a/nova/tests/unit/cmd/test_manage.py b/nova/tests/unit/cmd/test_manage.py
index 309c2fc829..10c1a77c94 100644
--- a/nova/tests/unit/cmd/test_manage.py
+++ b/nova/tests/unit/cmd/test_manage.py
@@ -17,11 +17,11 @@ import datetime
from io import StringIO
import sys
import textwrap
+from unittest import mock
import warnings
import ddt
import fixtures
-import mock
from oslo_db import exception as db_exc
from oslo_serialization import jsonutils
from oslo_utils.fixture import uuidsentinel
@@ -40,7 +40,6 @@ from nova import test
from nova.tests import fixtures as nova_fixtures
from nova.tests.unit import fake_requests
-
CONF = conf.CONF
@@ -2945,11 +2944,54 @@ class TestNovaManagePlacement(test.NoDBTestCase):
neutron.update_port.assert_called_once_with(
uuidsentinel.port_id, body=expected_update_body)
- def test_audit_with_wrong_provider_uuid(self):
+ @mock.patch.object(manage.PlacementCommands,
+ '_check_orphaned_allocations_for_provider')
+ @mock.patch('nova.scheduler.client.report.SchedulerReportClient.get')
+ def test_audit_with_provider_uuid(
+ self, get_resource_providers, check_orphaned_allocs,
+ ):
+ rps = [
+ {
+ "generation": 1,
+ "uuid": uuidsentinel.rp1,
+ "links": None,
+ "name": "rp1",
+ "parent_provider_uuid": None,
+ "root_provider_uuid": uuidsentinel.rp1,
+ },
+ ]
+ get_resource_providers.return_value = fake_requests.FakeResponse(
+ 200, content=jsonutils.dumps({"resource_providers": rps}))
+
+ # we found one orphaned allocation per RP and we had no faults
+ check_orphaned_allocs.side_effect = ((1, 0),)
+
+ ret = self.cli.audit(
+ verbose=True, delete=False,
+ provider_uuid=uuidsentinel.fake_uuid)
+
+ # We found orphaned allocations but we left them
+ self.assertEqual(3, ret)
+
+ get_resource_providers.assert_called_once_with(
+ f'/resource_providers?uuid={uuidsentinel.fake_uuid}',
+ global_request_id=mock.ANY,
+ version='1.14')
+
+ # Only the specified RP is checked
+ check_orphaned_allocs.assert_has_calls([
+ mock.call(mock.ANY, mock.ANY, mock.ANY, rps[0], False),
+ ])
+
+ output = self.output.getvalue()
+ self.assertIn('Processed 1 allocation', output)
+
+ def test_audit_with_invalid_provider_uuid(self):
with mock.patch.object(
- self.cli, '_get_resource_provider',
- side_effect=exception.ResourceProviderNotFound(
- name_or_uuid=uuidsentinel.fake_uuid)):
+ self.cli, '_get_resource_provider',
+ side_effect=exception.ResourceProviderNotFound(
+ name_or_uuid=uuidsentinel.fake_uuid),
+ ):
ret = self.cli.audit(
provider_uuid=uuidsentinel.fake_uuid)
self.assertEqual(127, ret)
@@ -3005,6 +3047,11 @@ class TestNovaManagePlacement(test.NoDBTestCase):
expected_ret = 0
self.assertEqual(expected_ret, ret)
+ get_resource_providers.assert_called_once_with(
+ '/resource_providers',
+ global_request_id=mock.ANY,
+ version='1.14')
+
call1 = mock.call(mock.ANY, mock.ANY, mock.ANY, rps[0], delete)
call2 = mock.call(mock.ANY, mock.ANY, mock.ANY, rps[1], delete)
if errors:
@@ -3952,3 +3999,262 @@ class LibvirtCommandsTestCase(test.NoDBTestCase):
output = self.output.getvalue()
self.assertEqual(3, ret)
self.assertIn(uuidsentinel.instance, output)
+
+
+class ImagePropertyCommandsTestCase(test.NoDBTestCase):
+
+ def setUp(self):
+ super().setUp()
+ self.output = StringIO()
+ self.useFixture(fixtures.MonkeyPatch('sys.stdout', self.output))
+ self.commands = manage.ImagePropertyCommands()
+
+ @mock.patch('nova.objects.Instance.get_by_uuid')
+ @mock.patch('nova.context.target_cell')
+ @mock.patch('nova.objects.InstanceMapping.get_by_instance_uuid',
+ new=mock.Mock(cell_mapping=mock.sentinel.cm))
+ @mock.patch('nova.context.get_admin_context',
+ new=mock.Mock(return_value=mock.sentinel.ctxt))
+ def test_show_image_properties(
+ self, mock_target_cell, mock_get_instance
+ ):
+ mock_target_cell.return_value.__enter__.return_value = \
+ mock.sentinel.cctxt
+ mock_get_instance.return_value = objects.Instance(
+ uuid=uuidsentinel.instance,
+ vm_state=obj_fields.InstanceState.STOPPED,
+ system_metadata={
+ 'image_hw_disk_bus': 'virtio',
+ }
+ )
+ ret = self.commands.show(
+ instance_uuid=uuidsentinel.instance,
+ image_property='hw_disk_bus')
+ self.assertEqual(0, ret, 'return code')
+ self.assertIn('virtio', self.output.getvalue(), 'command output')
+
+ @mock.patch('nova.objects.Instance.get_by_uuid')
+ @mock.patch('nova.objects.InstanceMapping.get_by_instance_uuid',
+ new=mock.Mock())
+ @mock.patch('nova.context.get_admin_context',
+ new=mock.Mock(return_value=mock.sentinel.ctxt))
+ def test_show_image_properties_instance_not_found(
+ self,
+ mock_get_instance
+ ):
+ mock_get_instance.side_effect = exception.InstanceNotFound(
+ instance_id=uuidsentinel.instance)
+ ret = self.commands.show(
+ instance_uuid=uuidsentinel.instance,
+ image_property='hw_disk_bus')
+ self.assertEqual(2, ret, 'return code')
+
+ @mock.patch('nova.objects.InstanceMapping.get_by_instance_uuid')
+ @mock.patch('nova.context.get_admin_context',
+ new=mock.Mock(return_value=mock.sentinel.ctxt))
+ def test_show_image_properties_instance_mapping_not_found(
+ self,
+ mock_get_instance_mapping
+ ):
+ mock_get_instance_mapping.side_effect = \
+ exception.InstanceMappingNotFound(
+ uuid=uuidsentinel.instance)
+ ret = self.commands.show(
+ instance_uuid=uuidsentinel.instance,
+ image_property='hw_disk_bus')
+ self.assertEqual(2, ret, 'return code')
+
+ @mock.patch('nova.objects.Instance.get_by_uuid')
+ @mock.patch('nova.context.target_cell')
+ @mock.patch('nova.objects.InstanceMapping.get_by_instance_uuid',
+ new=mock.Mock(cell_mapping=mock.sentinel.cm))
+ @mock.patch('nova.context.get_admin_context',
+ new=mock.Mock(return_value=mock.sentinel.ctxt))
+ def test_show_image_properties_image_property_not_found(
+ self, mock_target_cell, mock_get_instance
+ ):
+ mock_target_cell.return_value.__enter__.return_value = \
+ mock.sentinel.cctxt
+ mock_get_instance.return_value = objects.Instance(
+ uuid=uuidsentinel.instance,
+ vm_state=obj_fields.InstanceState.STOPPED,
+ system_metadata={
+ 'image_hw_disk_bus': 'virtio',
+ }
+ )
+ ret = self.commands.show(
+ instance_uuid=uuidsentinel.instance,
+ image_property='foo')
+ self.assertEqual(3, ret, 'return code')
+
+ @mock.patch('nova.objects.InstanceMapping.get_by_instance_uuid')
+ @mock.patch('nova.context.get_admin_context',
+ new=mock.Mock(return_value=mock.sentinel.ctxt))
+ def test_show_image_properties_unknown_failure(
+ self,
+ mock_get_instance_mapping,
+ ):
+ mock_get_instance_mapping.side_effect = Exception()
+ ret = self.commands.show(
+ instance_uuid=uuidsentinel.instance,
+ image_property='hw_disk_bus')
+ self.assertEqual(1, ret, 'return code')
+
+ @mock.patch('nova.objects.Instance.get_by_uuid')
+ @mock.patch('nova.context.target_cell')
+ @mock.patch('nova.objects.Instance.save')
+ @mock.patch('nova.objects.InstanceMapping.get_by_instance_uuid',
+ new=mock.Mock(cell_mapping=mock.sentinel.cm))
+ @mock.patch('nova.context.get_admin_context',
+ new=mock.Mock(return_value=mock.sentinel.ctxt))
+ def test_set_image_properties(
+ self, mock_instance_save, mock_target_cell, mock_get_instance
+ ):
+ mock_target_cell.return_value.__enter__.return_value = \
+ mock.sentinel.cctxt
+ instance = objects.Instance(
+ uuid=uuidsentinel.instance,
+ vm_state=obj_fields.InstanceState.STOPPED,
+ system_metadata={
+ 'image_hw_disk_bus': 'virtio',
+ }
+ )
+ mock_get_instance.return_value = instance
+ ret = self.commands.set(
+ instance_uuid=uuidsentinel.instance,
+ image_properties=['hw_cdrom_bus=sata']
+ )
+ self.assertEqual(0, ret, 'return code')
+ self.assertIn('image_hw_cdrom_bus', instance.system_metadata)
+ self.assertEqual(
+ 'sata',
+ instance.system_metadata.get('image_hw_cdrom_bus'),
+ 'image_hw_cdrom_bus'
+ )
+ self.assertEqual(
+ 'virtio',
+ instance.system_metadata.get('image_hw_disk_bus'),
+ 'image_hw_disk_bus'
+ )
+ mock_instance_save.assert_called_once()
+
+ @mock.patch('nova.objects.Instance.get_by_uuid')
+ @mock.patch('nova.objects.InstanceMapping.get_by_instance_uuid',
+ new=mock.Mock())
+ @mock.patch('nova.context.get_admin_context',
+ new=mock.Mock(return_value=mock.sentinel.ctxt))
+ def test_set_image_properties_instance_not_found(self, mock_get_instance):
+ mock_get_instance.side_effect = exception.InstanceNotFound(
+ instance_id=uuidsentinel.instance)
+ ret = self.commands.set(
+ instance_uuid=uuidsentinel.instance,
+ image_properties=['hw_disk_bus=virtio'])
+ self.assertEqual(2, ret, 'return code')
+
+ @mock.patch('nova.objects.InstanceMapping.get_by_instance_uuid')
+ @mock.patch('nova.context.get_admin_context',
+ new=mock.Mock(return_value=mock.sentinel.ctxt))
+ def test_set_image_properties_instance_mapping_not_found(
+ self,
+ mock_get_instance_mapping
+ ):
+ mock_get_instance_mapping.side_effect = \
+ exception.InstanceMappingNotFound(
+ uuid=uuidsentinel.instance)
+ ret = self.commands.set(
+ instance_uuid=uuidsentinel.instance,
+ image_properties=['hw_disk_bus=virtio'])
+ self.assertEqual(2, ret, 'return code')
+
+ @mock.patch('nova.objects.Instance.get_by_uuid')
+ @mock.patch('nova.context.target_cell')
+ @mock.patch('nova.objects.InstanceMapping.get_by_instance_uuid',
+ new=mock.Mock(cell_mapping=mock.sentinel.cm))
+ @mock.patch('nova.context.get_admin_context',
+ new=mock.Mock(return_value=mock.sentinel.ctxt))
+ def test_set_image_properties_instance_invalid_state(
+ self, mock_target_cell, mock_get_instance
+ ):
+ mock_target_cell.return_value.__enter__.return_value = \
+ mock.sentinel.cctxt
+ mock_get_instance.return_value = objects.Instance(
+ uuid=uuidsentinel.instance,
+ vm_state=obj_fields.InstanceState.ACTIVE,
+ system_metadata={
+ 'image_hw_disk_bus': 'virtio',
+ }
+ )
+ ret = self.commands.set(
+ instance_uuid=uuidsentinel.instance,
+ image_properties=['hw_cdrom_bus=sata']
+ )
+ self.assertEqual(3, ret, 'return code')
+
+ @mock.patch('nova.objects.Instance.get_by_uuid')
+ @mock.patch('nova.context.target_cell')
+ @mock.patch('nova.objects.InstanceMapping.get_by_instance_uuid',
+ new=mock.Mock(cell_mapping=mock.sentinel.cm))
+ @mock.patch('nova.context.get_admin_context',
+ new=mock.Mock(return_value=mock.sentinel.ctxt))
+ def test_set_image_properties_invalid_input(
+ self, mock_target_cell, mock_get_instance
+ ):
+ mock_target_cell.return_value.__enter__.return_value = \
+ mock.sentinel.cctxt
+ mock_get_instance.return_value = objects.Instance(
+ uuid=uuidsentinel.instance,
+ vm_state=obj_fields.InstanceState.SHELVED,
+ system_metadata={
+ 'image_hw_disk_bus': 'virtio',
+ }
+ )
+ ret = self.commands.set(
+ instance_uuid=uuidsentinel.instance,
+ image_properties=['hw_cdrom_bus'])
+ self.assertEqual(4, ret, 'return code')
+
+ @mock.patch('nova.objects.Instance.get_by_uuid')
+ @mock.patch('nova.context.target_cell')
+ @mock.patch('nova.objects.InstanceMapping.get_by_instance_uuid',
+ new=mock.Mock(cell_mapping=mock.sentinel.cm))
+ @mock.patch('nova.context.get_admin_context',
+ new=mock.Mock(return_value=mock.sentinel.ctxt))
+ def test_set_image_properties_invalid_property_name(
+ self, mock_target_cell, mock_get_instance
+ ):
+ mock_target_cell.return_value.__enter__.return_value = \
+ mock.sentinel.cctxt
+ mock_get_instance.return_value = objects.Instance(
+ uuid=uuidsentinel.instance,
+ vm_state=obj_fields.InstanceState.SHELVED_OFFLOADED,
+ system_metadata={
+ 'image_hw_disk_bus': 'virtio',
+ }
+ )
+ ret = self.commands.set(
+ instance_uuid=uuidsentinel.instance,
+ image_properties=['foo=bar'])
+ self.assertEqual(5, ret, 'return code')
+
+ @mock.patch('nova.objects.Instance.get_by_uuid')
+ @mock.patch('nova.context.target_cell')
+ @mock.patch('nova.objects.InstanceMapping.get_by_instance_uuid',
+ new=mock.Mock(cell_mapping=mock.sentinel.cm))
+ @mock.patch('nova.context.get_admin_context',
+ new=mock.Mock(return_value=mock.sentinel.ctxt))
+ def test_set_image_properties_invalid_property_value(
+ self, mock_target_cell, mock_get_instance
+ ):
+ mock_target_cell.return_value.__enter__.return_value = \
+ mock.sentinel.cctxt
+ mock_get_instance.return_value = objects.Instance(
+ uuid=uuidsentinel.instance,
+ vm_state=obj_fields.InstanceState.STOPPED,
+ system_metadata={
+ 'image_hw_disk_bus': 'virtio',
+ }
+ )
+ ret = self.commands.set(
+ instance_uuid=uuidsentinel.instance,
+ image_properties=['hw_disk_bus=bar'])
+ self.assertEqual(6, ret, 'return code')
diff --git a/nova/tests/unit/cmd/test_nova_api.py b/nova/tests/unit/cmd/test_nova_api.py
index f13712eabd..a4f7d82105 100644
--- a/nova/tests/unit/cmd/test_nova_api.py
+++ b/nova/tests/unit/cmd/test_nova_api.py
@@ -12,7 +12,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from nova.cmd import api
from nova import config
diff --git a/nova/tests/unit/cmd/test_policy.py b/nova/tests/unit/cmd/test_policy.py
index 4c990a8ff1..29dd5610f6 100644
--- a/nova/tests/unit/cmd/test_policy.py
+++ b/nova/tests/unit/cmd/test_policy.py
@@ -18,9 +18,9 @@
"""
from io import StringIO
+from unittest import mock
import fixtures
-import mock
from nova.cmd import policy
import nova.conf
@@ -128,20 +128,21 @@ class TestPolicyCheck(test.NoDBTestCase):
self.assertEqual(set(expected_rules), set(passing_rules))
def test_filter_rules_non_admin(self):
- context = nova_context.RequestContext()
- rule_conditions = [base_policies.PROJECT_READER_OR_SYSTEM_READER]
+ context = nova_context.RequestContext(roles=['reader'])
+ rule_conditions = [base_policies.PROJECT_READER_OR_ADMIN]
expected_rules = [r.name for r in ia_policies.list_rules() if
r.check_str in rule_conditions]
self._check_filter_rules(context, expected_rules=expected_rules)
def test_filter_rules_admin(self):
- self._check_filter_rules()
+ context = nova_context.RequestContext(roles=['admin'])
+ self._check_filter_rules(context)
def test_filter_rules_instance_non_admin(self):
db_context = nova_context.RequestContext(user_id='fake-user',
project_id='fake-project')
instance = fake_instance.fake_instance_obj(db_context)
- context = nova_context.RequestContext()
+ context = nova_context.RequestContext(roles=['reader'])
expected_rules = [r.name for r in ia_policies.list_rules() if
r.check_str == base_policies.RULE_ANY]
self._check_filter_rules(context, instance, expected_rules)
@@ -150,13 +151,15 @@ class TestPolicyCheck(test.NoDBTestCase):
db_context = nova_context.RequestContext(user_id='fake-user',
project_id='fake-project')
instance = fake_instance.fake_instance_obj(db_context)
- self._check_filter_rules(target=instance)
+ context = nova_context.RequestContext(roles=['admin'])
+ self._check_filter_rules(context, target=instance)
def test_filter_rules_instance_owner(self):
db_context = nova_context.RequestContext(user_id='fake-user',
- project_id='fake-project')
+ project_id='fake-project',
+ roles=['reader'])
instance = fake_instance.fake_instance_obj(db_context)
- rule_conditions = [base_policies.PROJECT_READER_OR_SYSTEM_READER]
+ rule_conditions = [base_policies.PROJECT_READER_OR_ADMIN]
expected_rules = [r.name for r in ia_policies.list_rules() if
r.check_str in rule_conditions]
self._check_filter_rules(db_context, instance, expected_rules)
diff --git a/nova/tests/unit/cmd/test_scheduler.py b/nova/tests/unit/cmd/test_scheduler.py
index e207c7343f..2927492abc 100644
--- a/nova/tests/unit/cmd/test_scheduler.py
+++ b/nova/tests/unit/cmd/test_scheduler.py
@@ -10,7 +10,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from nova.cmd import scheduler
from nova import config
diff --git a/nova/tests/unit/cmd/test_status.py b/nova/tests/unit/cmd/test_status.py
index d0343212f2..f5fcc168ee 100644
--- a/nova/tests/unit/cmd/test_status.py
+++ b/nova/tests/unit/cmd/test_status.py
@@ -18,13 +18,12 @@ Unit tests for the nova-status CLI interfaces.
# NOTE(cdent): Additional tests of nova-status may be found in
# nova/tests/functional/test_nova_status.py. Those tests use the external
-# PlacementFixture, which is only available in functioanl tests.
+# PlacementFixture, which is only available in functional tests.
from io import StringIO
+from unittest import mock
import fixtures
-import mock
-
from keystoneauth1 import exceptions as ks_exc
from keystoneauth1 import loading as keystone
from keystoneauth1 import session
@@ -40,7 +39,6 @@ from nova import exception
# in the tests, we don't use them in the actual CLI.
from nova import objects
from nova.objects import service
-from nova import policy
from nova import test
from nova.tests import fixtures as nova_fixtures
@@ -394,60 +392,6 @@ class TestUpgradeCheckCinderAPI(test.NoDBTestCase):
self.assertEqual(upgradecheck.Code.SUCCESS, result.code)
-class TestUpgradeCheckPolicy(test.NoDBTestCase):
-
- new_default_status = upgradecheck.Code.WARNING
-
- def setUp(self):
- super(TestUpgradeCheckPolicy, self).setUp()
- self.cmd = status.UpgradeCommands()
- self.rule_name = "system_admin_api"
-
- def tearDown(self):
- super(TestUpgradeCheckPolicy, self).tearDown()
- # Check if policy is reset back after the upgrade check
- self.assertIsNone(policy._ENFORCER)
-
- def test_policy_rule_with_new_defaults(self):
- new_default = "role:admin and system_scope:all"
- rule = {self.rule_name: new_default}
- self.policy.set_rules(rule, overwrite=False)
- self.assertEqual(self.new_default_status,
- self.cmd._check_policy().code)
-
- def test_policy_rule_with_old_defaults(self):
- new_default = "is_admin:True"
- rule = {self.rule_name: new_default}
- self.policy.set_rules(rule, overwrite=False)
-
- self.assertEqual(upgradecheck.Code.SUCCESS,
- self.cmd._check_policy().code)
-
- def test_policy_rule_with_both_defaults(self):
- new_default = "(role:admin and system_scope:all) or is_admin:True"
- rule = {self.rule_name: new_default}
- self.policy.set_rules(rule, overwrite=False)
-
- self.assertEqual(upgradecheck.Code.SUCCESS,
- self.cmd._check_policy().code)
-
- def test_policy_checks_with_fresh_init_and_no_policy_override(self):
- self.policy = self.useFixture(nova_fixtures.OverridePolicyFixture(
- rules_in_file={}))
- policy.reset()
- self.assertEqual(upgradecheck.Code.SUCCESS,
- self.cmd._check_policy().code)
-
-
-class TestUpgradeCheckPolicyEnableScope(TestUpgradeCheckPolicy):
-
- new_default_status = upgradecheck.Code.SUCCESS
-
- def setUp(self):
- super(TestUpgradeCheckPolicyEnableScope, self).setUp()
- self.flags(enforce_scope=True, group="oslo_policy")
-
-
class TestUpgradeCheckOldCompute(test.NoDBTestCase):
def setUp(self):
@@ -474,7 +418,7 @@ class TestUpgradeCheckOldCompute(test.NoDBTestCase):
"nova.objects.service.get_minimum_version_all_cells",
return_value=too_old):
result = self.cmd._check_old_computes()
- self.assertEqual(upgradecheck.Code.WARNING, result.code)
+ self.assertEqual(upgradecheck.Code.FAILURE, result.code)
class TestCheckMachineTypeUnset(test.NoDBTestCase):
diff --git a/nova/tests/unit/compute/monitors/cpu/test_virt_driver.py b/nova/tests/unit/compute/monitors/cpu/test_virt_driver.py
index aed34ea30c..a563a7e346 100644
--- a/nova/tests/unit/compute/monitors/cpu/test_virt_driver.py
+++ b/nova/tests/unit/compute/monitors/cpu/test_virt_driver.py
@@ -15,7 +15,7 @@
"""Tests for Compute Driver CPU resource monitor."""
-import mock
+from unittest import mock
from nova.compute.monitors.cpu import virt_driver
from nova import objects
diff --git a/nova/tests/unit/compute/monitors/test_monitors.py b/nova/tests/unit/compute/monitors/test_monitors.py
index 34b4a34d20..d43f90206c 100644
--- a/nova/tests/unit/compute/monitors/test_monitors.py
+++ b/nova/tests/unit/compute/monitors/test_monitors.py
@@ -15,7 +15,7 @@
"""Tests for resource monitors."""
-import mock
+from unittest import mock
from nova.compute import monitors
from nova import test
diff --git a/nova/tests/unit/compute/provider_config_data/v1/validation_error_test_data.yaml b/nova/tests/unit/compute/provider_config_data/v1/validation_error_test_data.yaml
index 278b77cae6..ac0b61a207 100644
--- a/nova/tests/unit/compute/provider_config_data/v1/validation_error_test_data.yaml
+++ b/nova/tests/unit/compute/provider_config_data/v1/validation_error_test_data.yaml
@@ -31,7 +31,7 @@ property__source_file_present_value:
schema_version: '1.0'
__source_file: "present"
expected_messages:
- - "{} is not allowed for"
+ - "should not be valid under {}"
- "validating 'not' in schema['properties']['__source_file']"
property__source_file_present_null:
config:
@@ -39,7 +39,7 @@ property__source_file_present_null:
schema_version: '1.0'
__source_file: null
expected_messages:
- - "{} is not allowed for"
+ - "should not be valid under {}"
- "validating 'not' in schema['properties']['__source_file']"
provider_invalid_uuid:
config:
diff --git a/nova/tests/unit/compute/test_api.py b/nova/tests/unit/compute/test_api.py
index 64064cf636..9d6e9ba4bd 100644
--- a/nova/tests/unit/compute/test_api.py
+++ b/nova/tests/unit/compute/test_api.py
@@ -15,12 +15,13 @@
import contextlib
import datetime
+from unittest import mock
import ddt
import fixtures
import iso8601
-import mock
import os_traits as ot
+from oslo_limit import exception as limit_exceptions
from oslo_messaging import exceptions as oslo_exceptions
from oslo_serialization import jsonutils
from oslo_utils import fixture as utils_fixture
@@ -42,6 +43,7 @@ from nova import context
from nova.db.main import api as db
from nova import exception
from nova.image import glance as image_api
+from nova.limit import placement as placement_limit
from nova.network import constants
from nova.network import model
from nova.network import neutron as neutron_api
@@ -206,6 +208,10 @@ class _ComputeAPIUnitTestMixIn(object):
list_obj.obj_reset_changes()
return list_obj
+ @mock.patch(
+ 'nova.network.neutron.API.is_remote_managed_port',
+ new=mock.Mock(return_value=False),
+ )
@mock.patch('nova.objects.Quotas.check_deltas')
@mock.patch('nova.conductor.conductor_api.ComputeTaskAPI.build_instances')
@mock.patch('nova.compute.api.API._record_action_start')
@@ -521,6 +527,36 @@ class _ComputeAPIUnitTestMixIn(object):
instance, fake_bdm)
@mock.patch.object(compute_rpcapi.ComputeAPI, 'reserve_block_device_name')
+ @mock.patch.object(
+ objects.BlockDeviceMapping, 'get_by_volume_and_instance')
+ @mock.patch.object(objects.BlockDeviceMappingList, 'get_by_volume')
+ def test_attach_volume_reserve_bdm_timeout(
+ self, mock_get_by_volume, mock_get_by_volume_and_instance,
+ mock_reserve):
+ mock_get_by_volume.side_effect = exception.VolumeBDMNotFound(
+ volume_id='fake-volume-id')
+
+ fake_bdm = mock.MagicMock(spec=objects.BlockDeviceMapping)
+ mock_get_by_volume_and_instance.return_value = fake_bdm
+ instance = self._create_instance_obj()
+ volume = fake_volume.fake_volume(1, 'test-vol', 'test-vol',
+ None, None, None, None, None)
+
+ mock_reserve.side_effect = oslo_exceptions.MessagingTimeout()
+
+ mock_volume_api = mock.patch.object(self.compute_api, 'volume_api',
+ mock.MagicMock(spec=cinder.API))
+
+ with mock_volume_api as mock_v_api:
+ mock_v_api.get.return_value = volume
+ self.assertRaises(oslo_exceptions.MessagingTimeout,
+ self.compute_api.attach_volume,
+ self.context, instance, volume['id'])
+ mock_get_by_volume_and_instance.assert_called_once_with(
+ self.context, volume['id'], instance.uuid)
+ fake_bdm.destroy.assert_called_once_with()
+
+ @mock.patch.object(compute_rpcapi.ComputeAPI, 'reserve_block_device_name')
@mock.patch.object(objects.BlockDeviceMappingList, 'get_by_volume')
@mock.patch.object(compute_rpcapi.ComputeAPI, 'attach_volume')
def test_attach_volume_attachment_create_fails(
@@ -931,6 +967,31 @@ class _ComputeAPIUnitTestMixIn(object):
return snapshot_id
+ def _test_delete(self, delete_type, **attrs):
+ delete_time = datetime.datetime(
+ 1955, 11, 5, 9, 30, tzinfo=iso8601.UTC)
+ timeutils.set_time_override(delete_time)
+ self.addCleanup(timeutils.clear_time_override)
+
+ with test.nested(
+ mock.patch.object(
+ self.compute_api.compute_rpcapi, 'confirm_resize'),
+ mock.patch.object(
+ self.compute_api.compute_rpcapi, 'terminate_instance'),
+ mock.patch.object(
+ self.compute_api.compute_rpcapi, 'soft_delete_instance'),
+ ) as (
+ mock_confirm, mock_terminate, mock_soft_delete
+ ):
+ self._do_delete(
+ delete_type,
+ mock_confirm,
+ mock_terminate,
+ mock_soft_delete,
+ delete_time,
+ **attrs
+ )
+
@mock.patch.object(compute_utils,
'notify_about_instance_action')
@mock.patch.object(objects.Migration, 'get_by_instance_and_status')
@@ -950,12 +1011,13 @@ class _ComputeAPIUnitTestMixIn(object):
@mock.patch.object(objects.BlockDeviceMappingList,
'get_by_instance_uuid', return_value=[])
@mock.patch.object(objects.Instance, 'save')
- def _test_delete(self, delete_type, mock_save, mock_bdm_get, mock_elevated,
- mock_get_cn, mock_up, mock_record, mock_inst_update,
- mock_deallocate, mock_inst_meta, mock_inst_destroy,
- mock_notify_legacy, mock_get_inst,
- mock_save_im, mock_image_delete, mock_mig_get,
- mock_notify, **attrs):
+ def _do_delete(
+ self, delete_type, mock_confirm, mock_terminate, mock_soft_delete,
+ delete_time, mock_save, mock_bdm_get, mock_elevated, mock_get_cn,
+ mock_up, mock_record, mock_inst_update, mock_deallocate,
+ mock_inst_meta, mock_inst_destroy, mock_notify_legacy, mock_get_inst,
+ mock_save_im, mock_image_delete, mock_mig_get, mock_notify, **attrs
+ ):
expected_save_calls = [mock.call()]
expected_record_calls = []
expected_elevated_calls = []
@@ -965,17 +1027,11 @@ class _ComputeAPIUnitTestMixIn(object):
deltas = {'instances': -1,
'cores': -inst.flavor.vcpus,
'ram': -inst.flavor.memory_mb}
- delete_time = datetime.datetime(1955, 11, 5, 9, 30,
- tzinfo=iso8601.UTC)
- self.useFixture(utils_fixture.TimeFixture(delete_time))
task_state = (delete_type == 'soft_delete' and
task_states.SOFT_DELETING or task_states.DELETING)
updates = {'progress': 0, 'task_state': task_state}
if delete_type == 'soft_delete':
updates['deleted_at'] = delete_time
- rpcapi = self.compute_api.compute_rpcapi
- mock_confirm = self.useFixture(
- fixtures.MockPatchObject(rpcapi, 'confirm_resize')).mock
def _reset_task_state(context, instance, migration, src_host,
cast=False):
@@ -990,11 +1046,6 @@ class _ComputeAPIUnitTestMixIn(object):
snapshot_id = self._set_delete_shelved_part(inst,
mock_image_delete)
- mock_terminate = self.useFixture(
- fixtures.MockPatchObject(rpcapi, 'terminate_instance')).mock
- mock_soft_delete = self.useFixture(
- fixtures.MockPatchObject(rpcapi, 'soft_delete_instance')).mock
-
if inst.task_state == task_states.RESIZE_FINISH:
self._test_delete_resizing_part(inst, deltas)
@@ -1082,7 +1133,7 @@ class _ComputeAPIUnitTestMixIn(object):
mock_mig_get.assert_called_once_with(
self.context, instance_uuid, 'finished')
mock_confirm.assert_called_once_with(
- self.context, inst, migration, migration['source_compute'],
+ self.context, inst, migration, migration.source_compute,
cast=False)
if instance_host is not None:
mock_get_cn.assert_called_once_with(self.context,
@@ -1203,10 +1254,12 @@ class _ComputeAPIUnitTestMixIn(object):
@mock.patch('nova.objects.Instance.save')
@mock.patch('nova.compute.utils.notify_about_instance_usage')
@mock.patch('nova.objects.Service.get_by_compute_host')
+ @mock.patch('nova.compute.api.API._record_action_start')
@mock.patch('nova.compute.api.API._local_delete')
def test_delete_error_state_with_no_host(
- self, mock_local_delete, mock_service_get, _mock_notify,
- _mock_save, mock_bdm_get, mock_lookup, _mock_del_booting):
+ self, mock_local_delete, mock_record, mock_service_get,
+ _mock_notify, _mock_save, mock_bdm_get, mock_lookup,
+ _mock_del_booting):
# Instance in error state with no host should be a local delete
# for non API cells
inst = self._create_instance_obj(params=dict(vm_state=vm_states.ERROR,
@@ -1218,6 +1271,8 @@ class _ComputeAPIUnitTestMixIn(object):
mock_local_delete.assert_called_once_with(
self.context, inst, mock_bdm_get.return_value,
'delete', self.compute_api._do_delete)
+ mock_record.assert_called_once_with(self.context, inst,
+ instance_actions.DELETE)
mock_terminate.assert_not_called()
mock_service_get.assert_not_called()
@@ -1317,10 +1372,6 @@ class _ComputeAPIUnitTestMixIn(object):
self.context, instance_uuid, constraint='constraint',
hard_delete=False)
- def _fake_do_delete(context, instance, bdms,
- rservations=None, local=False):
- pass
-
@mock.patch.object(compute_utils, 'notify_about_instance_action')
@mock.patch.object(objects.BlockDeviceMapping, 'destroy')
@mock.patch.object(cinder.API, 'detach')
@@ -1342,9 +1393,11 @@ class _ComputeAPIUnitTestMixIn(object):
mock_elevated.return_value = self.context
mock_detach.side_effect = exception.VolumeNotFound('volume_id')
+ # lambda function is used to run no op call as a delete function
+ # called by compute_api._local_delete
self.compute_api._local_delete(self.context, inst, bdms,
'delete',
- self._fake_do_delete)
+ lambda *args, **kwargs: None)
mock_notify_legacy.assert_has_calls([
mock.call(self.compute_api.notifier, self.context,
@@ -1380,8 +1433,11 @@ class _ComputeAPIUnitTestMixIn(object):
inst._context = self.context
mock_elevated.return_value = self.context
bdms = []
+ # lambda function is used to run no op call as a delete function
+ # called by compute_api._local_delete
self.compute_api._local_delete(self.context, inst, bdms,
- 'delete', self._fake_do_delete)
+ 'delete',
+ lambda *args, **kwargs: None)
mock_del_arqs.assert_called_once_with(self.context, inst)
@mock.patch.object(objects.BlockDeviceMapping, 'destroy')
@@ -1817,6 +1873,7 @@ class _ComputeAPIUnitTestMixIn(object):
self.context, objects.Migration(),
test_migration.fake_db_migration())
fake_reqspec = objects.RequestSpec()
+ fake_reqspec.is_bfv = False
fake_reqspec.flavor = fake_inst.flavor
fake_numa_topology = objects.InstanceNUMATopology(cells=[
objects.InstanceNUMACell(
@@ -2036,7 +2093,8 @@ class _ComputeAPIUnitTestMixIn(object):
filter_properties = {'ignore_hosts': [fake_inst['host']]}
if request_spec:
- fake_spec = objects.RequestSpec()
+ fake_spec = objects.RequestSpec(
+ pci_requests=objects.InstancePCIRequests(requests=[]))
if requested_destination:
cell1 = objects.CellMapping(uuid=uuids.cell1, name='cell1')
fake_spec.requested_destination = objects.Destination(
@@ -2176,6 +2234,8 @@ class _ComputeAPIUnitTestMixIn(object):
def test_resize_allow_cross_cell_resize_true(self):
self._test_resize(allow_cross_cell_resize=True)
+ @mock.patch.object(compute_utils, 'is_volume_backed_instance',
+ new=mock.Mock(return_value=False))
@mock.patch('nova.servicegroup.api.API.service_is_up',
new=mock.Mock(return_value=True))
@mock.patch('nova.compute.flavors.get_flavor_by_flavor_id')
@@ -2391,6 +2451,8 @@ class _ComputeAPIUnitTestMixIn(object):
do_test()
+ @mock.patch.object(compute_utils, 'is_volume_backed_instance',
+ new=mock.Mock(return_value=False))
@mock.patch('nova.servicegroup.api.API.service_is_up',
new=mock.Mock(return_value=True))
@mock.patch.object(objects.Instance, 'save')
@@ -2449,6 +2511,8 @@ class _ComputeAPIUnitTestMixIn(object):
mock_record.assert_not_called()
mock_resize.assert_not_called()
+ @mock.patch.object(compute_utils, 'is_volume_backed_instance',
+ new=mock.Mock(return_value=False))
@mock.patch('nova.servicegroup.api.API.service_is_up',
new=mock.Mock(return_value=True))
@mock.patch.object(flavors, 'get_flavor_by_flavor_id')
@@ -2476,6 +2540,8 @@ class _ComputeAPIUnitTestMixIn(object):
fake_inst, flavor_id='flavor-id')
self.assertFalse(mock_save.called)
+ @mock.patch.object(compute_utils, 'is_volume_backed_instance',
+ new=mock.Mock(return_value=False))
@mock.patch('nova.servicegroup.api.API.service_is_up',
new=mock.Mock(return_value=True))
@mock.patch.object(flavors, 'get_flavor_by_flavor_id')
@@ -2509,6 +2575,32 @@ class _ComputeAPIUnitTestMixIn(object):
else:
self.fail("Exception not raised")
+ @mock.patch.object(placement_limit, 'enforce_num_instances_and_flavor')
+ @mock.patch.object(compute_utils, 'is_volume_backed_instance',
+ new=mock.Mock(return_value=False))
+ @mock.patch('nova.servicegroup.api.API.service_is_up',
+ new=mock.Mock(return_value=True))
+ @mock.patch.object(flavors, 'get_flavor_by_flavor_id')
+ def test_resize_instance_quota_exceeds_with_multiple_resources_ul(
+ self, mock_get_flavor, mock_enforce):
+ self.flags(driver="nova.quota.UnifiedLimitsDriver", group="quota")
+ mock_enforce.side_effect = limit_exceptions.ProjectOverLimit(
+ self.context.project_id, [limit_exceptions.OverLimitInfo(
+ resource_name='servers', limit=1, current_usage=1, delta=1)])
+ mock_get_flavor.return_value = self._create_flavor(id=333,
+ vcpus=3,
+ memory_mb=1536)
+
+ self.assertRaises(limit_exceptions.ProjectOverLimit,
+ self.compute_api.resize,
+ self.context, self._create_instance_obj(),
+ 'fake_flavor_id')
+
+ mock_get_flavor.assert_called_once_with('fake_flavor_id',
+ read_deleted="no")
+ mock_enforce.assert_called_once_with(
+ self.context, "fake", mock_get_flavor.return_value, False, 1, 1)
+
# TODO(huaqiang): Remove in Wallaby
@mock.patch('nova.servicegroup.api.API.service_is_up',
new=mock.Mock(return_value=True))
@@ -2565,9 +2657,6 @@ class _ComputeAPIUnitTestMixIn(object):
rpcapi = self.compute_api.compute_rpcapi
- mock_pause = self.useFixture(
- fixtures.MockPatchObject(rpcapi, 'pause_instance')).mock
-
with mock.patch.object(rpcapi, 'pause_instance') as mock_pause:
self.compute_api.pause(self.context, instance)
@@ -3346,7 +3435,7 @@ class _ComputeAPIUnitTestMixIn(object):
raise exception.InstanceQuiesceNotSupported(
instance_id=instance['uuid'], reason='unsupported')
if quiesce_fails:
- raise oslo_exceptions.MessagingTimeout('quiece timeout')
+ raise oslo_exceptions.MessagingTimeout('quiesce timeout')
quiesced[0] = True
def fake_unquiesce_instance(context, instance, mapping=None):
@@ -3407,7 +3496,9 @@ class _ComputeAPIUnitTestMixIn(object):
'device_type': None, 'snapshot_id': '1-snapshot',
'device_name': '/dev/vda',
'destination_type': 'volume', 'delete_on_termination': False,
- 'tag': None, 'volume_type': None})
+ 'tag': None, 'volume_type': None,
+ 'encrypted': None, 'encryption_format': None,
+ 'encryption_secret_uuid': None, 'encryption_options': None})
limits_patcher = mock.patch.object(
self.compute_api.volume_api, 'get_absolute_limits',
@@ -3470,7 +3561,9 @@ class _ComputeAPIUnitTestMixIn(object):
'device_type': None, 'snapshot_id': None,
'device_name': '/dev/vdh',
'destination_type': 'local', 'delete_on_termination': True,
- 'tag': None, 'volume_type': None})
+ 'tag': None, 'volume_type': None,
+ 'encrypted': False, 'encryption_format': None,
+ 'encryption_secret_uuid': None, 'encryption_options': None})
quiesced = [False, False]
@@ -3508,7 +3601,7 @@ class _ComputeAPIUnitTestMixIn(object):
self._test_snapshot_volume_backed(quiesce_required=True,
quiesce_unsupported=False)
- def test_snaphost_volume_backed_with_quiesce_failure(self):
+ def test_snapshot_volume_backed_with_quiesce_failure(self):
self.assertRaises(oslo_exceptions.MessagingTimeout,
self._test_snapshot_volume_backed,
quiesce_required=True,
@@ -3915,6 +4008,158 @@ class _ComputeAPIUnitTestMixIn(object):
_checks_for_create_and_rebuild.assert_called_once_with(
self.context, None, image, flavor, {}, [], None)
+ @ddt.data(True, False)
+ @mock.patch.object(objects.RequestSpec, 'save')
+ @mock.patch.object(objects.RequestSpec, 'get_by_instance_uuid')
+ @mock.patch.object(objects.Instance, 'save')
+ @mock.patch.object(objects.Instance, 'get_flavor')
+ @mock.patch.object(objects.BlockDeviceMappingList, 'get_by_instance_uuid')
+ @mock.patch.object(compute_api.API, '_get_image')
+ @mock.patch.object(compute_api.API, '_check_image_arch')
+ @mock.patch.object(compute_api.API, '_check_auto_disk_config')
+ @mock.patch.object(compute_api.API, '_checks_for_create_and_rebuild')
+ @mock.patch.object(compute_api.API, '_record_action_start')
+ def test_rebuild_volume_backed(self, reimage_boot_vol,
+ _record_action_start, _checks_for_create_and_rebuild,
+ _check_auto_disk_config,
+ _check_image_arch, mock_get_image,
+ mock_get_bdms, get_flavor,
+ instance_save, req_spec_get_by_inst_uuid, request_save):
+ """Test a scenario where the instance is volume backed and we rebuild
+ with following cases:
+
+ 1) reimage_boot_volume=True
+ 2) reimage_boot_volume=False
+
+ """
+ instance = fake_instance.fake_instance_obj(
+ self.context, vm_state=vm_states.ACTIVE, cell_name='fake-cell',
+ launched_at=timeutils.utcnow(),
+ system_metadata={}, image_ref=uuids.image_ref,
+ expected_attrs=['system_metadata'], node='fake')
+ bdms = objects.BlockDeviceMappingList(objects=[
+ objects.BlockDeviceMapping(
+ boot_index=None, image_id=None,
+ source_type='volume', destination_type='volume',
+ volume_type=None, snapshot_id=None,
+ volume_id=uuids.volume_id, volume_size=None)])
+ mock_get_bdms.return_value = bdms
+ get_flavor.return_value = test_flavor.fake_flavor
+ flavor = instance.get_flavor()
+ image = {
+ "id": uuids.image_ref,
+ "min_ram": 10, "min_disk": 1,
+ "properties": {
+ 'architecture': fields_obj.Architecture.X86_64}}
+ mock_get_image.return_value = (None, image)
+ fake_spec = objects.RequestSpec(id=1, force_nodes=None)
+ req_spec_get_by_inst_uuid.return_value = fake_spec
+ fake_volume = {'id': uuids.volume_id, 'status': 'in-use'}
+ fake_conn_info = '{}'
+ fake_device = 'fake_vda'
+ root_bdm = mock.MagicMock(
+ volume_id=uuids.volume_id, connection_info=fake_conn_info,
+ device_name=fake_device, attachment_id=uuids.old_attachment_id,
+ save=mock.MagicMock())
+ admin_pass = "new password"
+ with mock.patch.object(self.compute_api.volume_api, 'get',
+ return_value=fake_volume), \
+ mock.patch.object(compute_utils, 'get_root_bdm',
+ return_value=root_bdm), \
+ mock.patch.object(self.compute_api.compute_task_api,
+ 'rebuild_instance') as rebuild_instance:
+ if reimage_boot_vol:
+ self.compute_api.rebuild(self.context,
+ instance,
+ uuids.image_ref,
+ admin_pass,
+ reimage_boot_volume=True)
+ rebuild_instance.assert_called_once_with(self.context,
+ instance=instance, new_pass=admin_pass,
+ image_ref=uuids.image_ref,
+ orig_image_ref=None, orig_sys_metadata={},
+ injected_files=[], bdms=bdms,
+ preserve_ephemeral=False, host=None,
+ request_spec=fake_spec,
+ reimage_boot_volume=True,
+ target_state=None)
+ _check_auto_disk_config.assert_called_once_with(
+ image=image, auto_disk_config=None)
+ _checks_for_create_and_rebuild.assert_called_once_with(
+ self.context, None, image, flavor, {}, [], root_bdm)
+ mock_get_bdms.assert_called_once_with(
+ self.context, instance.uuid)
+ else:
+ self.assertRaises(
+ exception.NovaException,
+ self.compute_api.rebuild,
+ self.context,
+ instance,
+ uuids.image_ref,
+ admin_pass,
+ reimage_boot_volume=False,
+ target_state=None)
+
+ @mock.patch.object(objects.RequestSpec, 'save')
+ @mock.patch.object(objects.RequestSpec, 'get_by_instance_uuid')
+ @mock.patch.object(objects.Instance, 'save')
+ @mock.patch.object(objects.Instance, 'get_flavor')
+ @mock.patch.object(objects.BlockDeviceMappingList, 'get_by_instance_uuid')
+ @mock.patch.object(compute_api.API, '_get_image')
+ @mock.patch.object(compute_api.API, '_check_image_arch')
+ @mock.patch.object(compute_api.API, '_check_auto_disk_config')
+ @mock.patch.object(compute_api.API, '_checks_for_create_and_rebuild')
+ @mock.patch.object(compute_api.API, '_record_action_start')
+ def test_rebuild_volume_backed_fails(self, _record_action_start,
+ _checks_for_create_and_rebuild, _check_auto_disk_config,
+ _check_image_arch, mock_get_image,
+ mock_get_bdms, get_flavor,
+ instance_save, req_spec_get_by_inst_uuid, request_save):
+ """Test a scenario where we don't pass parameters to rebuild
+ boot volume
+ """
+ instance = fake_instance.fake_instance_obj(
+ self.context, vm_state=vm_states.ACTIVE, cell_name='fake-cell',
+ launched_at=timeutils.utcnow(),
+ system_metadata={}, image_ref=uuids.image_ref,
+ expected_attrs=['system_metadata'], node='fake')
+ bdms = objects.BlockDeviceMappingList(objects=[
+ objects.BlockDeviceMapping(
+ boot_index=None, image_id=None,
+ source_type='volume', destination_type='volume',
+ volume_type=None, snapshot_id=None,
+ volume_id=uuids.volume_id, volume_size=None)])
+ mock_get_bdms.return_value = bdms
+ get_flavor.return_value = test_flavor.fake_flavor
+ image = {
+ "id": uuids.image_ref,
+ "min_ram": 10, "min_disk": 1,
+ "properties": {
+ 'architecture': fields_obj.Architecture.X86_64}}
+ mock_get_image.return_value = (None, image)
+ fake_spec = objects.RequestSpec(id=1, force_nodes=None)
+ req_spec_get_by_inst_uuid.return_value = fake_spec
+ fake_volume = {'id': uuids.volume_id, 'status': 'in-use'}
+ fake_conn_info = '{}'
+ fake_device = 'fake_vda'
+ root_bdm = mock.MagicMock(
+ volume_id=uuids.volume_id, connection_info=fake_conn_info,
+ device_name=fake_device, attachment_id=uuids.old_attachment_id,
+ save=mock.MagicMock())
+ admin_pass = "new password"
+ with mock.patch.object(self.compute_api.volume_api, 'get',
+ return_value=fake_volume), \
+ mock.patch.object(compute_utils, 'get_root_bdm',
+ return_value=root_bdm):
+ self.assertRaises(exception.NovaException,
+ self.compute_api.rebuild,
+ self.context,
+ instance,
+ uuids.image_ref,
+ admin_pass,
+ reimage_boot_volume=False,
+ target_state=None)
+
@mock.patch.object(objects.RequestSpec, 'get_by_instance_uuid')
@mock.patch.object(objects.Instance, 'save')
@mock.patch.object(objects.Instance, 'get_flavor')
@@ -3963,7 +4208,8 @@ class _ComputeAPIUnitTestMixIn(object):
orig_image_ref=uuids.image_ref,
orig_sys_metadata=orig_system_metadata, bdms=bdms,
preserve_ephemeral=False, host=instance.host,
- request_spec=fake_spec)
+ request_spec=fake_spec, reimage_boot_volume=False,
+ target_state=None)
_check_auto_disk_config.assert_called_once_with(
image=image, auto_disk_config=None)
@@ -4036,7 +4282,8 @@ class _ComputeAPIUnitTestMixIn(object):
orig_image_ref=uuids.image_ref,
orig_sys_metadata=orig_system_metadata, bdms=bdms,
preserve_ephemeral=False, host=None,
- request_spec=fake_spec)
+ request_spec=fake_spec, reimage_boot_volume=False,
+ target_state=None)
# assert the request spec was modified so the scheduler picks
# the existing instance host/node
req_spec_save.assert_called_once_with()
@@ -4104,7 +4351,8 @@ class _ComputeAPIUnitTestMixIn(object):
orig_image_ref=uuids.image_ref,
orig_sys_metadata=orig_system_metadata, bdms=bdms,
preserve_ephemeral=False, host=instance.host,
- request_spec=fake_spec)
+ request_spec=fake_spec, reimage_boot_volume=False,
+ target_state=None)
_check_auto_disk_config.assert_called_once_with(
image=image, auto_disk_config=None)
@@ -4163,7 +4411,8 @@ class _ComputeAPIUnitTestMixIn(object):
orig_image_ref=uuids.image_ref,
orig_sys_metadata=orig_system_metadata, bdms=bdms,
preserve_ephemeral=False, host=instance.host,
- request_spec=fake_spec)
+ request_spec=fake_spec, reimage_boot_volume=False,
+ target_state=None)
_check_auto_disk_config.assert_called_once_with(
image=image, auto_disk_config=None)
@@ -4227,7 +4476,8 @@ class _ComputeAPIUnitTestMixIn(object):
orig_image_ref=uuids.image_ref,
orig_sys_metadata=orig_system_metadata, bdms=bdms,
preserve_ephemeral=False, host=instance.host,
- request_spec=fake_spec)
+ request_spec=fake_spec, reimage_boot_volume=False,
+ target_state=None)
_check_auto_disk_config.assert_called_once_with(
image=image, auto_disk_config=None)
@@ -4329,6 +4579,8 @@ class _ComputeAPIUnitTestMixIn(object):
@mock.patch('nova.objects.Quotas.get_all_by_project_and_user',
new=mock.MagicMock())
+ @mock.patch.object(compute_utils, 'is_volume_backed_instance',
+ new=mock.Mock(return_value=False))
@mock.patch('nova.objects.Quotas.count_as_dict')
@mock.patch('nova.objects.Quotas.limit_check_project_and_user')
@mock.patch('nova.objects.Instance.save')
@@ -4371,6 +4623,8 @@ class _ComputeAPIUnitTestMixIn(object):
@mock.patch('nova.objects.Quotas.get_all_by_project_and_user',
new=mock.MagicMock())
+ @mock.patch.object(compute_utils, 'is_volume_backed_instance',
+ new=mock.Mock(return_value=False))
@mock.patch('nova.objects.Quotas.count_as_dict')
@mock.patch('nova.objects.Quotas.limit_check_project_and_user')
@mock.patch('nova.objects.Instance.save')
@@ -4740,7 +4994,7 @@ class _ComputeAPIUnitTestMixIn(object):
def test_validate_vol_az_for_create_vol_az_matches_default_cpu_az(self):
"""Tests the scenario that the instance is not being created in a
specific zone and the volume's zone matches
- CONF.default_availabilty_zone so None is returned indicating the
+ CONF.default_availability_zone so None is returned indicating the
RequestSpec.availability_zone does not need to be updated.
"""
self.flags(cross_az_attach=False, group='cinder')
@@ -5548,7 +5802,10 @@ class _ComputeAPIUnitTestMixIn(object):
destination_type='volume', volume_type=None,
snapshot_id=None, volume_id=uuids.volume_id,
volume_size=None)])
- rescue_image_meta_obj = image_meta_obj.ImageMeta.from_dict({})
+ rescue_image_meta_obj = image_meta_obj.ImageMeta.from_dict({
+ 'properties': {'hw_rescue_device': 'disk',
+ 'hw_rescue_bus': 'scsi'}
+ })
with test.nested(
mock.patch.object(self.compute_api.placementclient,
@@ -5600,6 +5857,7 @@ class _ComputeAPIUnitTestMixIn(object):
# Assert that the instance task state as set in the compute API
self.assertEqual(task_states.RESCUING, instance.task_state)
+ @mock.patch('nova.objects.instance.Instance.image_meta')
@mock.patch('nova.objects.compute_node.ComputeNode'
'.get_by_host_and_nodename')
@mock.patch('nova.compute.utils.is_volume_backed_instance',
@@ -5608,7 +5866,8 @@ class _ComputeAPIUnitTestMixIn(object):
'.get_by_instance_uuid')
def test_rescue_bfv_without_required_trait(self, mock_get_bdms,
mock_is_volume_backed,
- mock_get_cn):
+ mock_get_cn,
+ mock_image_meta):
instance = self._create_instance_obj()
bdms = objects.BlockDeviceMappingList(objects=[
objects.BlockDeviceMapping(
@@ -5616,6 +5875,12 @@ class _ComputeAPIUnitTestMixIn(object):
destination_type='volume', volume_type=None,
snapshot_id=None, volume_id=uuids.volume_id,
volume_size=None)])
+
+ instance.image_meta = image_meta_obj.ImageMeta.from_dict({
+ 'properties': {'hw_rescue_device': 'disk',
+ 'hw_rescue_bus': 'scsi'}
+ })
+
with test.nested(
mock.patch.object(self.compute_api.placementclient,
'get_provider_traits'),
@@ -5653,6 +5918,124 @@ class _ComputeAPIUnitTestMixIn(object):
mock_get_traits.assert_called_once_with(
self.context, uuids.cn)
+ @mock.patch('nova.objects.image_meta.ImageMeta.from_image_ref')
+ @mock.patch('nova.objects.compute_node.ComputeNode'
+ '.get_by_host_and_nodename')
+ @mock.patch('nova.compute.utils.is_volume_backed_instance',
+ return_value=True)
+ @mock.patch('nova.objects.block_device.BlockDeviceMappingList'
+ '.get_by_instance_uuid')
+ def test_rescue_bfv_with_required_image_properties(
+ self, mock_get_bdms, mock_is_volume_backed, mock_get_cn,
+ mock_image_meta_obj_from_ref):
+ instance = self._create_instance_obj()
+ bdms = objects.BlockDeviceMappingList(objects=[
+ objects.BlockDeviceMapping(
+ boot_index=0, image_id=uuids.image_id, source_type='image',
+ destination_type='volume', volume_type=None,
+ snapshot_id=None, volume_id=uuids.volume_id,
+ volume_size=None)])
+ rescue_image_meta_obj = image_meta_obj.ImageMeta.from_dict({
+ 'properties': {'hw_rescue_device': 'disk',
+ 'hw_rescue_bus': 'scsi'}
+ })
+
+ with test.nested(
+ mock.patch.object(self.compute_api.placementclient,
+ 'get_provider_traits'),
+ mock.patch.object(self.compute_api.volume_api, 'get'),
+ mock.patch.object(self.compute_api.volume_api, 'check_attached'),
+ mock.patch.object(instance, 'save'),
+ mock.patch.object(self.compute_api, '_record_action_start'),
+ mock.patch.object(self.compute_api.compute_rpcapi,
+ 'rescue_instance')
+ ) as (
+ mock_get_traits, mock_get_volume, mock_check_attached,
+ mock_instance_save, mock_record_start, mock_rpcapi_rescue
+ ):
+ # Mock out the returned compute node, image_meta, bdms and volume
+ mock_image_meta_obj_from_ref.return_value = rescue_image_meta_obj
+ mock_get_bdms.return_value = bdms
+ mock_get_volume.return_value = mock.sentinel.volume
+ mock_get_cn.return_value = mock.Mock(uuid=uuids.cn)
+
+ # Ensure the required trait is returned, allowing BFV rescue
+ mock_trait_info = mock.Mock(traits=[ot.COMPUTE_RESCUE_BFV])
+ mock_get_traits.return_value = mock_trait_info
+
+ # Try to rescue the instance
+ self.compute_api.rescue(self.context, instance,
+ rescue_image_ref=uuids.rescue_image_id,
+ allow_bfv_rescue=True)
+
+ # Assert all of the calls made in the compute API
+ mock_get_bdms.assert_called_once_with(self.context, instance.uuid)
+ mock_get_volume.assert_called_once_with(
+ self.context, uuids.volume_id)
+ mock_check_attached.assert_called_once_with(
+ self.context, mock.sentinel.volume)
+ mock_is_volume_backed.assert_called_once_with(
+ self.context, instance, bdms)
+ mock_get_cn.assert_called_once_with(
+ self.context, instance.host, instance.node)
+ mock_get_traits.assert_called_once_with(self.context, uuids.cn)
+ mock_instance_save.assert_called_once_with(
+ expected_task_state=[None])
+ mock_record_start.assert_called_once_with(
+ self.context, instance, instance_actions.RESCUE)
+ mock_rpcapi_rescue.assert_called_once_with(
+ self.context, instance=instance, rescue_password=None,
+ rescue_image_ref=uuids.rescue_image_id, clean_shutdown=True)
+
+ # Assert that the instance task state as set in the compute API
+ self.assertEqual(task_states.RESCUING, instance.task_state)
+
+ @mock.patch('nova.objects.image_meta.ImageMeta.from_image_ref')
+ @mock.patch('nova.compute.utils.is_volume_backed_instance',
+ return_value=True)
+ @mock.patch('nova.objects.block_device.BlockDeviceMappingList'
+ '.get_by_instance_uuid')
+ def test_rescue_bfv_without_required_image_properties(
+ self, mock_get_bdms, mock_is_volume_backed,
+ mock_image_meta_obj_from_ref):
+ instance = self._create_instance_obj()
+ bdms = objects.BlockDeviceMappingList(objects=[
+ objects.BlockDeviceMapping(
+ boot_index=0, image_id=uuids.image_id, source_type='image',
+ destination_type='volume', volume_type=None,
+ snapshot_id=None, volume_id=uuids.volume_id,
+ volume_size=None)])
+ rescue_image_meta_obj = image_meta_obj.ImageMeta.from_dict({
+ 'properties': {}
+ })
+
+ with test.nested(
+ mock.patch.object(self.compute_api.volume_api, 'get'),
+ mock.patch.object(self.compute_api.volume_api, 'check_attached'),
+ ) as (
+ mock_get_volume, mock_check_attached
+ ):
+ # Mock out the returned bdms, volume and image_meta
+ mock_get_bdms.return_value = bdms
+ mock_get_volume.return_value = mock.sentinel.volume
+ mock_image_meta_obj_from_ref.return_value = rescue_image_meta_obj
+
+ # Assert that any attempt to rescue a bfv instance on a compute
+ # node that does not report the COMPUTE_RESCUE_BFV trait fails and
+ # raises InstanceNotRescuable
+ self.assertRaises(exception.InstanceNotRescuable,
+ self.compute_api.rescue, self.context, instance,
+ rescue_image_ref=None, allow_bfv_rescue=True)
+
+ # Assert the calls made in the compute API prior to the failure
+ mock_get_bdms.assert_called_once_with(self.context, instance.uuid)
+ mock_get_volume.assert_called_once_with(
+ self.context, uuids.volume_id)
+ mock_check_attached.assert_called_once_with(
+ self.context, mock.sentinel.volume)
+ mock_is_volume_backed.assert_called_once_with(
+ self.context, instance, bdms)
+
@mock.patch('nova.compute.utils.is_volume_backed_instance',
return_value=True)
@mock.patch('nova.objects.block_device.BlockDeviceMappingList'
@@ -5837,6 +6220,41 @@ class _ComputeAPIUnitTestMixIn(object):
'volume_id': 'volume_id'}]
self._test_check_and_transform_bdm(block_device_mapping)
+ def test_update_ephemeral_encryption_bdms(self):
+ flavor = self._create_flavor(
+ extra_specs={
+ 'hw:ephemeral_encryption': True,
+ 'hw:ephemeral_encryption_format': 'luks',
+ }
+ )
+ block_device_mapping = [
+ {'device_name': '/dev/sda1',
+ 'source_type': 'snapshot', 'destination_type': 'volume',
+ 'snapshot_id': uuids.snapshot_id,
+ 'delete_on_termination': False,
+ 'boot_index': 0},
+ {'device_name': '/dev/sdb2',
+ 'source_type': 'image', 'destination_type': 'local',
+ 'image_id': uuids.image_id, 'delete_on_termination': False},
+ {'device_name': '/dev/sdb3',
+ 'source_type': 'blank', 'destination_type': 'local',
+ 'guest_format': 'ext3', 'delete_on_termination': False}]
+
+ block_device_mapping = (
+ block_device_obj.block_device_make_list_from_dicts(
+ self.context,
+ map(fake_block_device.AnonFakeDbBlockDeviceDict,
+ block_device_mapping)))
+
+ self.compute_api._update_ephemeral_encryption_bdms(
+ flavor, {}, block_device_mapping)
+
+ for bdm in block_device_mapping:
+ if bdm.is_local:
+ self.assertTrue(bdm.encrypted)
+ else:
+ self.assertFalse(bdm.encrypted)
+
def test_bdm_validate_set_size_and_instance(self):
swap_size = 42
ephemeral_size = 24
@@ -6296,8 +6714,9 @@ class _ComputeAPIUnitTestMixIn(object):
self.assertEqual(uuids.inst, result.uuid)
mock_get_inst.assert_called_once()
+ @mock.patch('nova.compute.api.LOG.exception')
@mock.patch.object(objects.Instance, 'get_by_uuid')
- def test_get_instance_from_cell_failure(self, mock_get_inst):
+ def test_get_instance_from_cell_failure(self, mock_get_inst, mock_log_exp):
# Make sure InstanceNotFound is bubbled up and not treated like
# other errors
mock_get_inst.side_effect = exception.InstanceNotFound(
@@ -6310,6 +6729,15 @@ class _ComputeAPIUnitTestMixIn(object):
self.compute_api._get_instance_from_cell, self.context,
im, [], False)
self.assertIn('could not be found', str(exp))
+ # Make sure other unexpected NovaException are logged for debugging
+ mock_get_inst.side_effect = exception.NovaException()
+ exp = self.assertRaises(
+ exception.NovaException, self.compute_api._get_instance_from_cell,
+ self.context, im, [], False)
+ msg = (f'Cell {cell_mapping.uuid} is not responding or returned an '
+ 'exception, hence instance info is not available.')
+ self.assertIn(msg, str(exp))
+ mock_log_exp.assert_called_once_with(mock_get_inst.side_effect)
@mock.patch('nova.compute.api.API._save_user_id_in_instance_mapping')
@mock.patch.object(objects.RequestSpec, 'get_by_instance_uuid')
@@ -7070,7 +7498,7 @@ class ComputeAPIUnitTestCase(_ComputeAPIUnitTestMixIn, test.NoDBTestCase):
# be no conflict.
self.compute_api._validate_numa_rebuild(instance, image, flavor)
- def test__validate_numa_rebuild_add_numa_toplogy(self):
+ def test__validate_numa_rebuild_add_numa_topology(self):
"""Assert that a rebuild of an instance with a new image
that requests a NUMA topology when the original instance did not
have a NUMA topology is invalid.
@@ -7093,7 +7521,7 @@ class ComputeAPIUnitTestCase(_ComputeAPIUnitTestMixIn, test.NoDBTestCase):
exception.ImageNUMATopologyRebuildConflict,
self.compute_api._validate_numa_rebuild, instance, image, flavor)
- def test__validate_numa_rebuild_remove_numa_toplogy(self):
+ def test__validate_numa_rebuild_remove_numa_topology(self):
"""Assert that a rebuild of an instance with a new image
that does not request a NUMA topology when the original image did
is invalid if it would alter the instances topology as a result.
@@ -7124,7 +7552,7 @@ class ComputeAPIUnitTestCase(_ComputeAPIUnitTestMixIn, test.NoDBTestCase):
self.compute_api._validate_numa_rebuild, instance,
image, flavor)
- def test__validate_numa_rebuild_alter_numa_toplogy(self):
+ def test__validate_numa_rebuild_alter_numa_topology(self):
"""Assert that a rebuild of an instance with a new image
that requests a different NUMA topology than the original image
is invalid.
@@ -7159,57 +7587,6 @@ class ComputeAPIUnitTestCase(_ComputeAPIUnitTestMixIn, test.NoDBTestCase):
image, flavor)
@mock.patch('nova.pci.request.get_pci_requests_from_flavor')
- def test_pmu_image_and_flavor_conflict(self, mock_request):
- """Tests that calling _validate_flavor_image_nostatus()
- with an image that conflicts with the flavor raises but no
- exception is raised if there is no conflict.
- """
- image = {'id': uuids.image_id, 'status': 'foo',
- 'properties': {'hw_pmu': False}}
- flavor = objects.Flavor(
- vcpus=1, memory_mb=512, root_gb=1, extra_specs={'hw:pmu': "true"})
- self.assertRaises(
- exception.ImagePMUConflict,
- self.compute_api._validate_flavor_image_nostatus,
- self.context, image, flavor, None)
-
- @mock.patch('nova.pci.request.get_pci_requests_from_flavor')
- def test_pmu_image_and_flavor_same_value(self, mock_request):
- # assert that if both the image and flavor are set to the same value
- # no exception is raised and the function returns nothing.
- flavor = objects.Flavor(
- vcpus=1, memory_mb=512, root_gb=1, extra_specs={'hw:pmu': "true"})
-
- image = {'id': uuids.image_id, 'status': 'foo',
- 'properties': {'hw_pmu': True}}
- self.assertIsNone(self.compute_api._validate_flavor_image_nostatus(
- self.context, image, flavor, None))
-
- @mock.patch('nova.pci.request.get_pci_requests_from_flavor')
- def test_pmu_image_only(self, mock_request):
- # assert that if only the image metadata is set then it is valid
- flavor = objects.Flavor(
- vcpus=1, memory_mb=512, root_gb=1, extra_specs={})
-
- # ensure string to bool conversion works for image metadata
- # property by using "yes".
- image = {'id': uuids.image_id, 'status': 'foo',
- 'properties': {'hw_pmu': "yes"}}
- self.assertIsNone(self.compute_api._validate_flavor_image_nostatus(
- self.context, image, flavor, None))
-
- @mock.patch('nova.pci.request.get_pci_requests_from_flavor')
- def test_pmu_flavor_only(self, mock_request):
- # assert that if only the flavor extra_spec is set then it is valid
- # and test the string to bool conversion of "on" works.
- flavor = objects.Flavor(
- vcpus=1, memory_mb=512, root_gb=1, extra_specs={'hw:pmu': "on"})
-
- image = {'id': uuids.image_id, 'status': 'foo', 'properties': {}}
- self.assertIsNone(self.compute_api._validate_flavor_image_nostatus(
- self.context, image, flavor, None))
-
- @mock.patch('nova.pci.request.get_pci_requests_from_flavor')
def test_pci_validated(self, mock_request):
"""Tests that calling _validate_flavor_image_nostatus() with
validate_pci=True results in get_pci_requests_from_flavor() being
@@ -7243,6 +7620,37 @@ class ComputeAPIUnitTestCase(_ComputeAPIUnitTestMixIn, test.NoDBTestCase):
requested_networks)
mock_get.assert_called_once_with(self.context, ['nova-compute'])
+ @mock.patch(
+ 'nova.network.neutron.API.is_remote_managed_port',
+ new=mock.Mock(return_value=True),
+ )
+ @mock.patch('nova.objects.service.get_minimum_version_all_cells',
+ return_value=60)
+ def test_check_support_vnic_remote_managed_version_before_61(
+ self, mock_get):
+ requested_networks = objects.NetworkRequestList(
+ objects=[objects.NetworkRequest(port_id=uuids.port)])
+ self.assertRaisesRegex(exception.ForbiddenWithRemoteManagedPorts,
+ 'Remote-managed ports are not supported until an upgrade is fully'
+ ' finished.',
+ self.compute_api._check_support_vnic_remote_managed,
+ self.context,
+ requested_networks)
+ mock_get.assert_called_once_with(self.context, ['nova-compute'])
+
+ @mock.patch(
+ 'nova.network.neutron.API.is_remote_managed_port',
+ new=mock.Mock(return_value=True),
+ )
+ @mock.patch('nova.objects.service.get_minimum_version_all_cells',
+ return_value=61)
+ def test_check_support_vnic_remote_managed_version_61(self, mock_get):
+ requested_networks = objects.NetworkRequestList(
+ objects=[objects.NetworkRequest(port_id=uuids.port)])
+ self.compute_api._check_support_vnic_remote_managed(self.context,
+ requested_networks)
+ mock_get.assert_called_once_with(self.context, ['nova-compute'])
+
def test_validate_and_build_base_options_translate_neutron_secgroup(self):
"""Tests that _check_requested_secgroups will return a uuid for a
requested Neutron security group and that will be returned from
@@ -7638,8 +8046,9 @@ class ComputeAPIUnitTestCase(_ComputeAPIUnitTestMixIn, test.NoDBTestCase):
@mock.patch.object(compute_utils, 'notify_about_instance_usage')
@mock.patch.object(objects.BlockDeviceMapping, 'destroy')
@mock.patch.object(objects.Instance, 'destroy')
+ @mock.patch('nova.compute.api.API._record_action_start')
def _test_delete_volume_backed_instance(
- self, vm_state, mock_instance_destroy, bdm_destroy,
+ self, vm_state, mock_record, mock_instance_destroy, bdm_destroy,
notify_about_instance_usage, mock_save, mock_elevated,
bdm_get_by_instance_uuid, mock_lookup, _mock_del_booting,
notify_about_instance_action):
@@ -7668,6 +8077,8 @@ class ComputeAPIUnitTestCase(_ComputeAPIUnitTestMixIn, test.NoDBTestCase):
'detach') as mock_detach:
self.compute_api.delete(self.context, inst)
+ mock_record.assert_called_once_with(self.context, inst,
+ instance_actions.DELETE)
mock_deallocate.assert_called_once_with(self.context, inst)
mock_detach.assert_called_once_with(self.context, volume_id,
inst.uuid)
@@ -7685,16 +8096,13 @@ class ComputeAPIUnitTestCase(_ComputeAPIUnitTestMixIn, test.NoDBTestCase):
self.assertTrue(hasattr(self.compute_api, 'host'))
self.assertEqual(CONF.host, self.compute_api.host)
- @mock.patch('nova.scheduler.client.report.SchedulerReportClient')
+ @mock.patch('nova.scheduler.client.report.report_client_singleton')
def test_placement_client_init(self, mock_report_client):
"""Tests to make sure that the construction of the placement client
- only happens once per API class instance.
+ uses the singleton helper, and happens only when needed.
"""
- self.assertIsNone(self.compute_api._placementclient)
- # Access the property twice to make sure SchedulerReportClient is
- # only loaded once.
- for x in range(2):
- self.compute_api.placementclient
+ self.assertFalse(mock_report_client.called)
+ self.compute_api.placementclient
mock_report_client.assert_called_once_with()
def test_validate_host_for_cold_migrate_same_host_fails(self):
@@ -7966,7 +8374,7 @@ class ComputeAPIUnitTestCase(_ComputeAPIUnitTestMixIn, test.NoDBTestCase):
@mock.patch('nova.objects.service.get_minimum_version_all_cells',
return_value=54)
def test_block_accelerators_until_service(self, mock_get_min):
- """Support operating server with acclerators until compute service
+ """Support operating server with accelerators until compute service
more than the version of 53.
"""
extra_specs = {'accel:device_profile': 'mydp'}
diff --git a/nova/tests/unit/compute/test_claims.py b/nova/tests/unit/compute/test_claims.py
index 8997511e73..dcdef56fbe 100644
--- a/nova/tests/unit/compute/test_claims.py
+++ b/nova/tests/unit/compute/test_claims.py
@@ -15,9 +15,9 @@
"""Tests for resource tracker claims."""
+from unittest import mock
import uuid
-import mock
from nova.compute import claims
from nova import context
@@ -169,7 +169,8 @@ class ClaimTestCase(test.NoDBTestCase):
spec=[{'vendor_id': 'v', 'product_id': 'p'}])
requests = objects.InstancePCIRequests(requests=[request])
self._claim(requests=requests)
- mock_pci_supports_requests.assert_called_once_with([request])
+ mock_pci_supports_requests.assert_called_once_with(
+ [request], provider_mapping=None)
@mock.patch('nova.pci.stats.PciDeviceStats.support_requests',
return_value=False)
@@ -181,7 +182,8 @@ class ClaimTestCase(test.NoDBTestCase):
exception.ComputeResourcesUnavailable,
'Claim pci failed.',
self._claim, requests=requests)
- mock_pci_supports_requests.assert_called_once_with([request])
+ mock_pci_supports_requests.assert_called_once_with(
+ [request], provider_mapping=None)
@mock.patch('nova.pci.stats.PciDeviceStats.support_requests')
def test_pci_pass_no_requests(self, mock_pci_supports_requests):
diff --git a/nova/tests/unit/compute/test_compute.py b/nova/tests/unit/compute/test_compute.py
index f65f1abdb7..36bcd368dc 100644
--- a/nova/tests/unit/compute/test_compute.py
+++ b/nova/tests/unit/compute/test_compute.py
@@ -22,10 +22,10 @@ import fixtures as std_fixtures
from itertools import chain
import operator
import sys
+from unittest import mock
from castellan import key_manager
import ddt
-import mock
from neutronclient.common import exceptions as neutron_exceptions
from oslo_log import log as logging
import oslo_messaging as messaging
@@ -168,7 +168,7 @@ class BaseTestCase(test.TestCase):
'uuid': uuids.fake_compute_node,
'vcpus_used': 0,
'deleted': 0,
- 'hypervisor_type': 'powervm',
+ 'hypervisor_type': 'libvirt',
'created_at': '2013-04-01T00:27:06.000000',
'local_gb_used': 0,
'updated_at': '2013-04-03T00:35:41.000000',
@@ -178,7 +178,7 @@ class BaseTestCase(test.TestCase):
'current_workload': 0,
'vcpus': 16,
'mapped': 1,
- 'cpu_info': 'ppc64,powervm,3940',
+ 'cpu_info': 'ppc64,libvirt,3940',
'running_vms': 0,
'free_disk_gb': 259,
'service_id': 7,
@@ -1389,13 +1389,14 @@ class ComputeVolumeTestCase(BaseTestCase):
@mock.patch.object(nova.virt.block_device, 'convert_snapshots')
@mock.patch.object(nova.virt.block_device, 'convert_volumes')
@mock.patch.object(nova.virt.block_device, 'convert_ephemerals')
+ @mock.patch.object(nova.virt.block_device, 'convert_local_images')
@mock.patch.object(nova.virt.block_device, 'convert_swap')
@mock.patch.object(nova.virt.block_device, 'attach_block_devices')
def test_prep_block_device_with_blanks(self, attach_block_devices,
- convert_swap, convert_ephemerals,
- convert_volumes, convert_snapshots,
- convert_images, convert_blanks,
- get_swap):
+ convert_swap, convert_local_images,
+ convert_ephemerals, convert_volumes,
+ convert_snapshots, convert_images,
+ convert_blanks, get_swap):
instance = self._create_fake_instance_obj()
instance['root_device_name'] = '/dev/vda'
root_volume = objects.BlockDeviceMapping(
@@ -1426,6 +1427,7 @@ class ComputeVolumeTestCase(BaseTestCase):
return bdm
convert_swap.return_value = []
+ convert_local_images.return_value = []
convert_ephemerals.return_value = []
convert_volumes.return_value = [blank_volume1, blank_volume2]
convert_snapshots.return_value = []
@@ -1438,6 +1440,7 @@ class ComputeVolumeTestCase(BaseTestCase):
'root_device_name': '/dev/vda',
'swap': [],
'ephemerals': [],
+ 'image': [],
'block_device_mapping': bdms
}
@@ -1452,6 +1455,7 @@ class ComputeVolumeTestCase(BaseTestCase):
self.assertIsNotNone(bdm.device_name)
convert_swap.assert_called_once_with(bdms)
+ convert_local_images.assert_called_once_with(bdms)
convert_ephemerals.assert_called_once_with(bdms)
bdm_args = tuple(bdms)
convert_volumes.assert_called_once_with(bdm_args)
@@ -2726,7 +2730,8 @@ class ComputeTestCase(BaseTestCase,
new_pass="new_password", orig_sys_metadata=sys_metadata,
bdms=[], recreate=False, on_shared_storage=False,
preserve_ephemeral=False, migration=None, scheduled_node=None,
- limits={}, request_spec=None, accel_uuids=[])
+ limits={}, request_spec=None, accel_uuids=[],
+ reimage_boot_volume=False, target_state=None)
self.compute.terminate_instance(self.context, instance, [])
def test_rebuild_driver(self):
@@ -2756,7 +2761,8 @@ class ComputeTestCase(BaseTestCase,
new_pass="new_password", orig_sys_metadata=sys_metadata,
bdms=[], recreate=False, on_shared_storage=False,
preserve_ephemeral=False, migration=None, scheduled_node=None,
- limits={}, request_spec=None, accel_uuids=[])
+ limits={}, request_spec=None, accel_uuids=[],
+ reimage_boot_volume=False, target_state=None)
self.assertTrue(called['rebuild'])
self.compute.terminate_instance(self.context, instance, [])
@@ -2808,7 +2814,8 @@ class ComputeTestCase(BaseTestCase,
new_pass="new_password", orig_sys_metadata=sys_metadata,
bdms=bdms, recreate=False, preserve_ephemeral=False,
migration=None, scheduled_node=None, limits={},
- on_shared_storage=False, request_spec=None, accel_uuids=[])
+ on_shared_storage=False, request_spec=None, accel_uuids=[],
+ reimage_boot_volume=False, target_state=None)
self.assertTrue(called['rebuild'])
self.compute.terminate_instance(self.context, instance, [])
@@ -2827,7 +2834,8 @@ class ComputeTestCase(BaseTestCase,
new_pass="new_password", orig_sys_metadata=sys_metadata, bdms=[],
recreate=False, on_shared_storage=False, preserve_ephemeral=False,
migration=None, scheduled_node=None, limits=None,
- request_spec=None, accel_uuids=[])
+ request_spec=None, accel_uuids=[], reimage_boot_volume=False,
+ target_state=None)
self.compute.terminate_instance(self.context, instance, [])
def test_rebuild_launched_at_time(self):
@@ -2848,7 +2856,7 @@ class ComputeTestCase(BaseTestCase,
new_pass="new_password", orig_sys_metadata={}, bdms=[],
recreate=False, on_shared_storage=False, preserve_ephemeral=False,
migration=None, scheduled_node=None, limits={}, request_spec=None,
- accel_uuids=[])
+ accel_uuids=[], reimage_boot_volume=False, target_state=None)
instance.refresh()
self.assertEqual(cur_time,
instance['launched_at'].replace(tzinfo=None))
@@ -2881,7 +2889,8 @@ class ComputeTestCase(BaseTestCase,
injected_files=injected_files, new_pass="new_password",
orig_sys_metadata=sys_metadata, bdms=[], recreate=False,
on_shared_storage=False, preserve_ephemeral=False, migration=None,
- scheduled_node=None, limits={}, request_spec=None, accel_uuids=[])
+ scheduled_node=None, limits={}, request_spec=None, accel_uuids=[],
+ reimage_boot_volume=False, target_state=None)
self.compute.terminate_instance(self.context, instance, [])
@mock.patch.object(objects.BlockDeviceMappingList, 'get_by_instance_uuid')
@@ -3212,6 +3221,7 @@ class ComputeTestCase(BaseTestCase,
expected = {
'swap': None,
'ephemerals': [],
+ 'image': [],
'root_device_name': None,
'block_device_mapping': driver_bdms
}
@@ -3240,6 +3250,7 @@ class ComputeTestCase(BaseTestCase,
expected = {
'swap': None,
'ephemerals': [],
+ 'image': [],
'root_device_name': None,
'block_device_mapping': driver_bdms
}
@@ -3273,7 +3284,11 @@ class ComputeTestCase(BaseTestCase,
'delete_on_termination': True,
'guest_format': None,
'volume_size': 2,
- 'boot_index': -1
+ 'boot_index': -1,
+ 'encrypted': True,
+ 'encryption_secret_uuid': uuids.secret,
+ 'encryption_format': 'luks',
+ 'encryption_options': None,
})
swap = fake_block_device.FakeDbBlockDeviceDict({
'id': 3,
@@ -3308,16 +3323,25 @@ class ComputeTestCase(BaseTestCase,
'device_type': 'disk',
'disk_bus': 'virtio',
'guest_format': None,
- 'size': 1
+ 'size': 1,
+ 'encrypted': False,
+ 'encryption_secret_uuid': None,
+ 'encryption_format': None,
+ 'encryption_options': None,
},
{
'device_name': '/dev/vdc',
'device_type': 'disk',
'disk_bus': 'virtio',
'guest_format': None,
- 'size': 2
+ 'size': 2,
+ 'encrypted': True,
+ 'encryption_secret_uuid': uuids.secret,
+ 'encryption_format': 'luks',
+ 'encryption_options': None,
}
],
+ 'image': [],
'block_device_mapping': [],
'root_device_name': None
}
@@ -4593,7 +4617,9 @@ class ComputeTestCase(BaseTestCase,
'limits': {},
'request_spec': None,
'on_shared_storage': False,
- 'accel_uuids': ()}),
+ 'accel_uuids': (),
+ 'reimage_boot_volume': False,
+ 'target_state': None}),
("set_admin_password", task_states.UPDATING_PASSWORD,
{'new_pass': None}),
("rescue_instance", task_states.RESCUING,
@@ -5111,7 +5137,8 @@ class ComputeTestCase(BaseTestCase,
injected_files=[], new_pass=password,
orig_sys_metadata=orig_sys_metadata, bdms=[], recreate=False,
on_shared_storage=False, preserve_ephemeral=False, migration=None,
- scheduled_node=None, limits={}, request_spec=None, accel_uuids=[])
+ scheduled_node=None, limits={}, request_spec=None, accel_uuids=[],
+ reimage_boot_volume=False, target_state=None)
inst_ref.refresh()
@@ -5645,6 +5672,7 @@ class ComputeTestCase(BaseTestCase,
pagesize=2048,
cpu_usage=2,
memory_usage=0,
+ socket=0,
pinned_cpus=set([1, 2]),
siblings=[set([1]), set([2])],
mempages=[objects.NUMAPagesTopology(
@@ -5660,6 +5688,7 @@ class ComputeTestCase(BaseTestCase,
pagesize=2048,
memory_usage=0,
cpu_usage=0,
+ socket=0,
siblings=[set([3]), set([4])],
mempages=[objects.NUMAPagesTopology(
size_kb=2048, total=256, used=0)])
@@ -5714,13 +5743,15 @@ class ComputeTestCase(BaseTestCase,
objects=[objects.PciDevice(vendor_id='1377',
product_id='0047',
address='0000:0a:00.1',
- request_id=uuids.req1)])
+ request_id=uuids.req1,
+ compute_node_id=1)])
new_pci_devices = objects.PciDeviceList(
objects=[objects.PciDevice(vendor_id='1377',
product_id='0047',
address='0000:0b:00.1',
- request_id=uuids.req2)])
+ request_id=uuids.req2,
+ compute_node_id=2)])
if expected_pci_addr == old_pci_devices[0].address:
expected_pci_device = old_pci_devices[0]
@@ -6066,10 +6097,9 @@ class ComputeTestCase(BaseTestCase,
return fake_network.fake_get_instance_nw_info(self)
self.stub_out('nova.network.neutron.API.get_instance_nw_info', stupid)
- self.useFixture(
- std_fixtures.MonkeyPatch(
- 'nova.network.neutron.API.supports_port_binding_extension',
- lambda *args: True))
+ self.useFixture(std_fixtures.MonkeyPatch(
+ 'nova.network.neutron.API.has_port_binding_extension',
+ lambda *args: True))
# creating instance testdata
instance = self._create_fake_instance_obj({'host': 'dummy'})
c = context.get_admin_context()
@@ -6107,7 +6137,7 @@ class ComputeTestCase(BaseTestCase,
mock_pre.assert_called_once_with(
test.MatchType(nova.context.RequestContext),
test.MatchType(objects.Instance),
- {'swap': None, 'ephemerals': [],
+ {'swap': None, 'ephemerals': [], 'image': [],
'root_device_name': None,
'block_device_mapping': []},
mock.ANY, mock.ANY, mock.ANY)
@@ -6304,9 +6334,7 @@ class ComputeTestCase(BaseTestCase,
self.assertEqual('completed', migration.status)
mock_pre.assert_called_once_with(c, instance, False, None,
dest, migrate_data)
- mock_migrate.assert_called_once_with(c, instance,
- {'source_compute': instance[
- 'host'], 'dest_compute': dest})
+ mock_migrate.assert_called_once_with(c, instance, mock.ANY)
mock_post.assert_called_once_with(c, instance, False, dest)
mock_clear.assert_called_once_with(mock.ANY)
@@ -6389,7 +6417,6 @@ class ComputeTestCase(BaseTestCase,
migration_obj = objects.Migration(uuid=uuids.migration,
source_node=instance.node,
status='completed')
- migration = {'source_compute': srchost, 'dest_compute': dest, }
migrate_data = objects.LibvirtLiveMigrateData(
is_shared_instance_path=False,
is_shared_block_storage=False,
@@ -6412,7 +6439,7 @@ class ComputeTestCase(BaseTestCase,
self.assertIn('cleanup', result)
self.assertTrue(result['cleanup'])
- mock_migrate.assert_called_once_with(c, instance, migration)
+ mock_migrate.assert_called_once_with(c, instance, mock.ANY)
mock_post.assert_called_once_with(c, instance, False, dest)
mock_clear.assert_called_once_with(mock.ANY)
@@ -6476,13 +6503,11 @@ class ComputeTestCase(BaseTestCase,
self.assertEqual(2, mock_notify.call_count)
post_live_migration.assert_has_calls([
mock.call(c, instance, {'swap': None, 'ephemerals': [],
- 'root_device_name': None,
+ 'image': [], 'root_device_name': None,
'block_device_mapping': []},
migrate_data)])
- migration = {'source_compute': srchost,
- 'dest_compute': dest, }
migrate_instance_start.assert_has_calls([
- mock.call(c, instance, migration)])
+ mock.call(c, instance, mock.ANY)])
post_live_migration_at_destination.assert_has_calls([
mock.call(c, instance, False, dest)])
post_live_migration_at_source.assert_has_calls(
@@ -6709,7 +6734,7 @@ class ComputeTestCase(BaseTestCase,
mock_setup.assert_called_once_with(c, instance, self.compute.host,
teardown=True)
mock_rollback.assert_called_once_with(c, instance, [],
- {'swap': None, 'ephemerals': [],
+ {'swap': None, 'ephemerals': [], 'image': [],
'root_device_name': None,
'block_device_mapping': []},
destroy_disks=True, migrate_data=None)
@@ -7385,7 +7410,7 @@ class ComputeTestCase(BaseTestCase,
fake_instance.fake_db_instance(uuid=uuids.migration_instance_5,
vm_state=vm_states.ACTIVE,
task_state=None),
- # The expceted migration result will be None instead of error
+ # The expected migration result will be None instead of error
# since _poll_unconfirmed_resizes will not change it
# when the instance vm state is RESIZED and task state
# is deleting, see bug 1301696 for more detail
@@ -7442,12 +7467,11 @@ class ComputeTestCase(BaseTestCase,
# raise exception for uuids.migration_instance_4 to check
# migration status does not get set to 'error' on confirm_resize
# failure.
- if instance['uuid'] == uuids.migration_instance_4:
+ if instance.uuid == uuids.migration_instance_4:
raise test.TestingException('bomb')
self.assertIsNotNone(migration)
for migration2 in migrations:
- if (migration2['instance_uuid'] ==
- migration['instance_uuid']):
+ if migration2['instance_uuid'] == migration.instance_uuid:
migration2['status'] = 'confirmed'
self.stub_out(
@@ -8139,7 +8163,7 @@ class ComputeTestCase(BaseTestCase,
self.compute._default_block_device_names(instance, {}, bdms)
self.assertEqual('/dev/vda', instance.root_device_name)
- mock_def.assert_called_once_with(instance, '/dev/vda', [], [],
+ mock_def.assert_called_once_with(instance, '/dev/vda', [], [], [],
[bdm for bdm in bdms])
@mock.patch.object(objects.BlockDeviceMapping, 'save')
@@ -8153,7 +8177,7 @@ class ComputeTestCase(BaseTestCase,
self.compute._default_block_device_names(instance, {}, bdms)
- mock_def.assert_called_once_with(instance, '/dev/vda', [], [],
+ mock_def.assert_called_once_with(instance, '/dev/vda', [], [], [],
[bdm for bdm in bdms])
@mock.patch.object(objects.Instance, 'save')
@@ -8175,7 +8199,7 @@ class ComputeTestCase(BaseTestCase,
self.assertEqual('/dev/vda', instance.root_device_name)
mock_default_dev.assert_called_once_with(instance, mock.ANY, bdms[0])
mock_default_name.assert_called_once_with(instance, '/dev/vda', [], [],
- [bdm for bdm in bdms])
+ [], [bdm for bdm in bdms])
def test_default_block_device_names_with_blank_volumes(self):
instance = self._create_fake_instance_obj()
@@ -8235,7 +8259,7 @@ class ComputeTestCase(BaseTestCase,
self.assertEqual('/dev/vda', instance.root_device_name)
self.assertTrue(object_save.called)
default_device_names.assert_called_once_with(instance,
- '/dev/vda', [bdms[-2]], [bdms[-1]],
+ '/dev/vda', [], [bdms[-2]], [bdms[-1]],
[bdm for bdm in bdms[:-2]])
def test_reserve_block_device_name(self):
@@ -8619,16 +8643,13 @@ class ComputeAPITestCase(BaseTestCase):
def test_create_instance_sets_system_metadata(self):
# Make sure image properties are copied into system metadata.
- with mock.patch.object(
- self.compute_api.compute_task_api, 'schedule_and_build_instances',
- ) as mock_sbi:
- ref, resv_id = self.compute_api.create(
- self.context,
- flavor=self.default_flavor,
- image_href='f5000000-0000-0000-0000-000000000000')
+ ref, resv_id = self.compute_api.create(
+ self.context,
+ flavor=self.default_flavor,
+ image_href='f5000000-0000-0000-0000-000000000000')
- build_call = mock_sbi.call_args_list[0]
- instance = build_call[1]['build_requests'][0].instance
+ build_call = self.schedule_and_build_instances_mock.call_args_list[0]
+ instance = build_call[1]['build_requests'][0].instance
image_props = {'image_kernel_id': uuids.kernel_id,
'image_ramdisk_id': uuids.ramdisk_id,
@@ -8638,16 +8659,14 @@ class ComputeAPITestCase(BaseTestCase):
self.assertEqual(value, instance.system_metadata[key])
def test_create_saves_flavor(self):
- with mock.patch.object(
- self.compute_api.compute_task_api, 'schedule_and_build_instances',
- ) as mock_sbi:
- ref, resv_id = self.compute_api.create(
- self.context,
- flavor=self.default_flavor,
- image_href=uuids.image_href_id)
+ ref, resv_id = self.compute_api.create(
+ self.context,
+ flavor=self.default_flavor,
+ image_href=uuids.image_href_id)
+
+ build_call = self.schedule_and_build_instances_mock.call_args_list[0]
+ instance = build_call[1]['build_requests'][0].instance
- build_call = mock_sbi.call_args_list[0]
- instance = build_call[1]['build_requests'][0].instance
self.assertIn('flavor', instance)
self.assertEqual(self.default_flavor.flavorid,
instance.flavor.flavorid)
@@ -8655,19 +8674,18 @@ class ComputeAPITestCase(BaseTestCase):
def test_create_instance_associates_security_groups(self):
# Make sure create associates security groups.
- with test.nested(
- mock.patch.object(self.compute_api.compute_task_api,
- 'schedule_and_build_instances'),
- mock.patch('nova.network.security_group_api.validate_name',
- return_value=uuids.secgroup_id),
- ) as (mock_sbi, mock_secgroups):
+ with mock.patch(
+ "nova.network.security_group_api.validate_name",
+ return_value=uuids.secgroup_id,
+ ) as mock_secgroups:
self.compute_api.create(
self.context,
flavor=self.default_flavor,
image_href=uuids.image_href_id,
security_groups=['testgroup'])
- build_call = mock_sbi.call_args_list[0]
+ build_call = (
+ self.schedule_and_build_instances_mock.call_args_list[0])
reqspec = build_call[1]['request_spec'][0]
self.assertEqual(1, len(reqspec.security_groups))
@@ -8692,28 +8710,29 @@ class ComputeAPITestCase(BaseTestCase):
len(db.instance_get_all(self.context)))
mock_secgroups.assert_called_once_with(mock.ANY, 'invalid_sec_group')
+ @mock.patch(
+ 'nova.network.neutron.API.is_remote_managed_port',
+ new=mock.Mock(return_value=False),
+ )
def test_create_instance_associates_requested_networks(self):
# Make sure create adds the requested networks to the RequestSpec
requested_networks = objects.NetworkRequestList(
objects=[objects.NetworkRequest(port_id=uuids.port_instance)])
- with test.nested(
- mock.patch.object(
- self.compute_api.compute_task_api,
- 'schedule_and_build_instances'),
- mock.patch.object(
- self.compute_api.network_api,
- 'create_resource_requests',
- return_value=(None, [], objects.RequestLevelParams())),
- ) as (mock_sbi, _mock_create_resreqs):
+ with mock.patch.object(
+ self.compute_api.network_api,
+ "create_resource_requests",
+ return_value=(None, [], objects.RequestLevelParams()),
+ ):
self.compute_api.create(
self.context,
flavor=self.default_flavor,
image_href=uuids.image_href_id,
requested_networks=requested_networks)
- build_call = mock_sbi.call_args_list[0]
+ build_call = (
+ self.schedule_and_build_instances_mock.call_args_list[0])
reqspec = build_call[1]['request_spec'][0]
self.assertEqual(1, len(reqspec.requested_networks))
@@ -8856,7 +8875,7 @@ class ComputeAPITestCase(BaseTestCase):
group.create()
get_group_mock.return_value = group
- self.assertRaises(exception.QuotaError, self.compute_api.create,
+ self.assertRaises(exception.OverQuota, self.compute_api.create,
self.context, self.default_flavor, self.fake_image['id'],
scheduler_hints={'group': group.uuid},
check_server_group_quota=True)
@@ -9827,6 +9846,10 @@ class ComputeAPITestCase(BaseTestCase):
self.assertEqual(refs[i]['display_name'], name)
self.assertEqual(refs[i]['hostname'], name)
+ @mock.patch(
+ 'nova.network.neutron.API.is_remote_managed_port',
+ new=mock.Mock(return_value=False),
+ )
@mock.patch("nova.objects.service.get_minimum_version_all_cells")
@mock.patch(
"nova.network.neutron.API.has_extended_resource_request_extension")
@@ -10209,8 +10232,7 @@ class ComputeAPITestCase(BaseTestCase):
self.compute_api.get_console_output,
self.context, instance)
- @mock.patch.object(compute_utils, 'notify_about_instance_action')
- def test_attach_interface(self, mock_notify):
+ def test_attach_interface(self):
instance = self._create_fake_instance_obj()
nwinfo = [fake_network_cache_model.new_vif()]
network_id = nwinfo[0]['network']['id']
@@ -10230,8 +10252,12 @@ class ComputeAPITestCase(BaseTestCase):
mock.patch.object(
self.compute,
"_claim_pci_device_for_interface_attach",
- return_value=None)
- ) as (cap, mock_lock, mock_create_resource_req, mock_claim_pci):
+ return_value=None),
+ mock.patch.object(compute_utils, 'notify_about_instance_action'),
+ ) as (
+ cap, mock_lock, mock_create_resource_req, mock_claim_pci,
+ mock_notify
+ ):
mock_create_resource_req.return_value = (
None, [], mock.sentinel.req_lvl_params)
vif = self.compute.attach_interface(self.context,
@@ -10492,7 +10518,7 @@ class ComputeAPITestCase(BaseTestCase):
pci_reqs = mock_claim_pci.mock_calls[0][1][1]
self.assertEqual([pci_req], pci_reqs.requests)
- # after the pci claim we also need to allocate that pci to the instace
+ # after the pci claim we also need to allocate that pci to the instance
mock_allocate_pci.assert_called_once_with(self.context, instance)
# and as this changes the instance we have to save it.
mock_save.assert_called_once_with()
@@ -10739,8 +10765,13 @@ class ComputeAPITestCase(BaseTestCase):
supports_attach_interface=True),
mock.patch.object(self.compute.network_api,
'create_resource_requests'),
- mock.patch.object(self.compute.rt, 'claim_pci_devices',
- return_value=[]),
+ mock.patch.object(
+ self.compute.rt,
+ 'claim_pci_devices',
+ side_effect=exception.PciDeviceRequestFailed(
+ requests=instance.pci_requests
+ )
+ ),
mock.patch.object(
self.compute, '_allocate_port_resource_for_instance'),
mock.patch(
@@ -10816,7 +10847,7 @@ class ComputeAPITestCase(BaseTestCase):
'add_resources_to_instance_allocation'),
mock.patch(
'nova.compute.utils.'
- 'update_pci_request_spec_with_allocated_interface_name'),
+ 'update_pci_request_with_placement_allocations'),
) as (
mock_get_nodename, mock_get_alloc_candidates, mock_add_res,
mock_update_pci
@@ -10886,7 +10917,7 @@ class ComputeAPITestCase(BaseTestCase):
new=mock.NonCallableMock()),
mock.patch(
'nova.compute.utils.'
- 'update_pci_request_spec_with_allocated_interface_name',
+ 'update_pci_request_with_placement_allocations',
new=mock.NonCallableMock()),
) as (
mock_get_nodename, mock_get_alloc_candidates, mock_add_res,
@@ -10931,7 +10962,7 @@ class ComputeAPITestCase(BaseTestCase):
'add_resources_to_instance_allocation'),
mock.patch(
'nova.compute.utils.'
- 'update_pci_request_spec_with_allocated_interface_name',
+ 'update_pci_request_with_placement_allocations',
new=mock.NonCallableMock()),
) as (
mock_get_nodename, mock_get_alloc_candidates, mock_add_res,
@@ -10998,7 +11029,7 @@ class ComputeAPITestCase(BaseTestCase):
'add_resources_to_instance_allocation'),
mock.patch(
'nova.compute.utils.'
- 'update_pci_request_spec_with_allocated_interface_name'),
+ 'update_pci_request_with_placement_allocations'),
mock.patch(
'nova.scheduler.client.report.SchedulerReportClient.'
'remove_resources_from_instance_allocation'),
@@ -11049,8 +11080,7 @@ class ComputeAPITestCase(BaseTestCase):
mock_remove_res.assert_called_once_with(
self.context, instance.uuid, mock.sentinel.resources)
- @mock.patch.object(compute_utils, 'notify_about_instance_action')
- def test_detach_interface(self, mock_notify):
+ def test_detach_interface(self):
nwinfo, port_id = self.test_attach_interface()
instance = self._create_fake_instance_obj()
instance.info_cache = objects.InstanceInfoCache.new(
@@ -11083,10 +11113,13 @@ class ComputeAPITestCase(BaseTestCase):
mock.patch('nova.pci.request.get_instance_pci_request_from_vif',
return_value=pci_req),
mock.patch.object(self.compute.rt, 'unclaim_pci_devices'),
- mock.patch.object(instance, 'save')
+ mock.patch.object(instance, 'save'),
+ mock.patch.object(compute_utils, 'notify_about_instance_action'),
) as (
- mock_remove_alloc, mock_deallocate, mock_lock,
- mock_get_pci_req, mock_unclaim_pci, mock_instance_save):
+ mock_remove_alloc, mock_deallocate, mock_lock,
+ mock_get_pci_req, mock_unclaim_pci, mock_instance_save,
+ mock_notify
+ ):
self.compute.detach_interface(self.context, instance, port_id)
mock_deallocate.assert_called_once_with(
@@ -11561,12 +11594,60 @@ class ComputeAPITestCase(BaseTestCase):
instance.uuid, None)
@mock.patch.object(context.RequestContext, 'elevated')
+ @mock.patch.object(cinder.API, 'detach')
+ @mock.patch.object(cinder.API, 'terminate_connection')
+ @mock.patch.object(compute_manager.ComputeManager,
+ '_get_instance_block_device_info')
+ @mock.patch('nova.virt.fake.FakeDriver.get_volume_connector')
+ def test_shutdown_with_legacy_volume_detach(
+ self, mock_get_connector, mock_info, mock_terminate, mock_detach,
+ mock_elevated,
+ ):
+ # test _shutdown_instance with legacy BDMs without a volume
+ # attachment ID
+ admin = context.get_admin_context()
+ mock_elevated.return_value = admin
+ instance = self._create_fake_instance_obj()
+ connector = 'fake-connector'
+ mock_get_connector.return_value = connector
+
+ vol_a_bdm = block_device_obj.BlockDeviceMapping(
+ instance_uuid=instance['uuid'],
+ source_type='volume', destination_type='volume',
+ delete_on_termination=False,
+ volume_id=uuids.volume_a_id,
+ attachment_id=None)
+ vol_b_bdm = block_device_obj.BlockDeviceMapping(
+ instance_uuid=instance['uuid'],
+ source_type='volume', destination_type='volume',
+ delete_on_termination=False,
+ volume_id=uuids.volume_b_id,
+ attachment_id=None)
+ bdms = [vol_a_bdm, vol_b_bdm]
+
+ self.compute._shutdown_instance(admin, instance, bdms)
+
+ # we should only got the connector once, regardless of the number of
+ # volumes
+ mock_get_connector.assert_called_once_with(instance)
+ # but we should have separate terminate and detach calls
+ mock_terminate.assert_has_calls([
+ mock.call(admin, uuids.volume_a_id, connector),
+ mock.call(admin, uuids.volume_b_id, connector),
+ ])
+ mock_detach.assert_has_calls([
+ mock.call(admin, uuids.volume_a_id, instance.uuid),
+ mock.call(admin, uuids.volume_b_id, instance.uuid),
+ ])
+
+ @mock.patch.object(context.RequestContext, 'elevated')
@mock.patch.object(cinder.API, 'attachment_delete')
@mock.patch.object(compute_manager.ComputeManager,
'_get_instance_block_device_info')
- def test_shutdown_with_attachment_delete(self, mock_info,
- mock_attach_delete,
- mock_elevated):
+ @mock.patch('nova.virt.fake.FakeDriver.get_volume_connector')
+ def test_shutdown_with_attachment_delete(
+ self, mock_get_connector, mock_info, mock_attach_delete, mock_elevated,
+ ):
# test _shutdown_instance with volume bdm containing an
# attachment id. This should use the v3 cinder api.
admin = context.get_admin_context()
@@ -11586,14 +11667,18 @@ class ComputeAPITestCase(BaseTestCase):
self.compute._shutdown_instance(admin, instance, bdms)
mock_attach_delete.assert_called_once_with(admin, attachment_id)
+ # we shouldn't try to get a connector for a cinder v3-style attachment
+ mock_get_connector.assert_not_called()
@mock.patch.object(compute_manager.LOG, 'debug')
@mock.patch.object(cinder.API, 'attachment_delete')
@mock.patch.object(compute_manager.ComputeManager,
'_get_instance_block_device_info')
- def test_shutdown_with_attachment_not_found(self, mock_info,
- mock_attach_delete,
- mock_debug_log):
+ @mock.patch('nova.virt.fake.FakeDriver.get_volume_connector')
+ def test_shutdown_with_attachment_not_found(
+ self, mock_get_connector, mock_info, mock_attach_delete,
+ mock_debug_log,
+ ):
# test _shutdown_instance with attachment_delete throwing
# a VolumeAttachmentNotFound exception. This should not
# cause _shutdown_instance to fail. Only a debug log
@@ -11619,6 +11704,8 @@ class ComputeAPITestCase(BaseTestCase):
# get last call to LOG.debug and verify correct exception is in there
self.assertIsInstance(mock_debug_log.call_args[0][1],
exception.VolumeAttachmentNotFound)
+ # we shouldn't try to get a connector for a cinder v3-style attachment
+ mock_get_connector.assert_not_called()
def test_terminate_with_volumes(self):
# Make sure that volumes get detached during instance termination.
@@ -11878,7 +11965,7 @@ class ComputeAPITestCase(BaseTestCase):
force=False)
@mock.patch('nova.compute.utils.notify_about_instance_action')
- def _test_evacuate(self, mock_notify, force=None):
+ def _test_evacuate(self, mock_notify, force=None, target_state=None):
instance = self._create_fake_instance_obj(services=True)
self.assertIsNone(instance.task_state)
@@ -11893,17 +11980,16 @@ class ComputeAPITestCase(BaseTestCase):
instance.save()
@mock.patch.object(objects.Service, 'get_by_compute_host')
- @mock.patch.object(self.compute_api.compute_task_api,
- 'rebuild_instance')
@mock.patch.object(objects.ComputeNodeList, 'get_all_by_host')
@mock.patch.object(objects.RequestSpec,
'get_by_instance_uuid')
@mock.patch.object(self.compute_api.servicegroup_api, 'service_is_up')
- def do_test(service_is_up, get_by_instance_uuid, get_all_by_host,
- rebuild_instance, get_service):
+ def do_test(
+ service_is_up, get_by_instance_uuid, get_all_by_host, get_service
+ ):
service_is_up.return_value = False
get_by_instance_uuid.return_value = fake_spec
- rebuild_instance.side_effect = fake_rebuild_instance
+ self.rebuild_instance_mock.side_effect = fake_rebuild_instance
get_all_by_host.return_value = objects.ComputeNodeList(
objects=[objects.ComputeNode(
host='fake_dest_host',
@@ -11916,12 +12002,13 @@ class ComputeAPITestCase(BaseTestCase):
host='fake_dest_host',
on_shared_storage=True,
admin_password=None,
- force=force)
+ force=force,
+ target_state=target_state)
if force is False:
host = None
else:
host = 'fake_dest_host'
- rebuild_instance.assert_called_once_with(
+ self.rebuild_instance_mock.assert_called_once_with(
ctxt,
instance=instance,
new_pass=None,
@@ -11933,7 +12020,8 @@ class ComputeAPITestCase(BaseTestCase):
recreate=True,
on_shared_storage=True,
request_spec=fake_spec,
- host=host)
+ host=host,
+ target_state=target_state)
do_test()
instance.refresh()
@@ -11965,6 +12053,9 @@ class ComputeAPITestCase(BaseTestCase):
def test_evacuate_with_forced_host(self):
self._test_evacuate(force=True)
+ def test_evacuate_with_target_state(self):
+ self._test_evacuate(target_state="stopped")
+
@mock.patch('nova.servicegroup.api.API.service_is_up',
return_value=False)
def test_fail_evacuate_with_non_existing_destination(self, _service_is_up):
@@ -13039,16 +13130,13 @@ class ComputeAPIAggrTestCase(BaseTestCase):
hosts = aggregate.hosts if 'hosts' in aggregate else None
self.assertIn(values[0][1][0], hosts)
- @mock.patch('nova.scheduler.client.report.SchedulerReportClient')
+ @mock.patch('nova.scheduler.client.report.report_client_singleton')
def test_placement_client_init(self, mock_report_client):
"""Tests to make sure that the construction of the placement client
- only happens once per AggregateAPI class instance.
+ uses the singleton helper, and happens only when needed.
"""
- self.assertIsNone(self.api._placement_client)
- # Access the property twice to make sure SchedulerReportClient is
- # only loaded once.
- for x in range(2):
- self.api.placement_client
+ self.assertFalse(mock_report_client.called)
+ self.api.placement_client
mock_report_client.assert_called_once_with()
@@ -13427,7 +13515,8 @@ class EvacuateHostTestCase(BaseTestCase):
super(EvacuateHostTestCase, self).tearDown()
def _rebuild(self, on_shared_storage=True, migration=None,
- send_node=False, vm_states_is_stopped=False):
+ send_node=False, vm_states_is_stopped=False,
+ expect_error=False):
network_api = self.compute.network_api
ctxt = context.get_admin_context()
@@ -13441,7 +13530,7 @@ class EvacuateHostTestCase(BaseTestCase):
return_value=mock.sentinel.mapping)
@mock.patch(
'nova.compute.utils.'
- 'update_pci_request_spec_with_allocated_interface_name')
+ 'update_pci_request_with_placement_allocations')
@mock.patch('nova.compute.utils.notify_about_instance_action')
@mock.patch('nova.compute.utils.notify_about_instance_rebuild')
@mock.patch.object(network_api, 'setup_networks_on_host')
@@ -13461,7 +13550,8 @@ class EvacuateHostTestCase(BaseTestCase):
image_ref, injected_files, 'newpass', {}, bdms, recreate=True,
on_shared_storage=on_shared_storage, migration=migration,
preserve_ephemeral=False, scheduled_node=node, limits=limits,
- request_spec=None, accel_uuids=[])
+ request_spec=None, accel_uuids=[], reimage_boot_volume=False,
+ target_state=None)
if vm_states_is_stopped:
mock_notify_rebuild.assert_has_calls([
mock.call(ctxt, self.inst, self.inst.host, phase='start',
@@ -13473,6 +13563,11 @@ class EvacuateHostTestCase(BaseTestCase):
action='power_off', phase='start'),
mock.call(ctxt, self.inst, self.inst.host,
action='power_off', phase='end')])
+ elif expect_error:
+ mock_notify_rebuild.assert_has_calls([
+ mock.call(ctxt, self.inst, self.compute.host,
+ phase='error', exception=mock.ANY, bdms=bdms)])
+ return
else:
mock_notify_rebuild.assert_has_calls([
mock.call(ctxt, self.inst, self.inst.host, phase='start',
@@ -13527,14 +13622,15 @@ class EvacuateHostTestCase(BaseTestCase):
mock.patch.object(self.compute, '_get_compute_info',
side_effect=fake_get_compute_info)
) as (mock_inst, mock_get):
- self._rebuild()
+ self.assertRaises(exception.InstanceFaultRollback,
+ self._rebuild, expect_error=True)
# Should be on destination host
instance = db.instance_get(self.context, self.inst.id)
- self.assertEqual(instance['host'], self.compute.host)
- self.assertIsNone(instance['node'])
- self.assertTrue(mock_inst.called)
- self.assertTrue(mock_get.called)
+ self.assertEqual('fake_host_2', instance['host'])
+ self.assertEqual('fakenode2', instance['node'])
+ mock_inst.assert_not_called()
+ mock_get.assert_called_once_with(mock.ANY, self.compute.host)
def test_rebuild_on_host_node_passed(self):
patch_get_info = mock.patch.object(self.compute, '_get_compute_info')
diff --git a/nova/tests/unit/compute/test_compute_mgr.py b/nova/tests/unit/compute/test_compute_mgr.py
index 4d7967b37e..73c9d32197 100644
--- a/nova/tests/unit/compute/test_compute_mgr.py
+++ b/nova/tests/unit/compute/test_compute_mgr.py
@@ -17,6 +17,7 @@ import copy
import datetime
import fixtures as std_fixtures
import time
+from unittest import mock
from cinderclient import exceptions as cinder_exception
from cursive import exception as cursive_exception
@@ -24,7 +25,6 @@ import ddt
from eventlet import event as eventlet_event
from eventlet import timeout as eventlet_timeout
from keystoneauth1 import exceptions as keystone_exception
-import mock
import netaddr
from oslo_log import log as logging
import oslo_messaging as messaging
@@ -57,6 +57,7 @@ from nova.objects import fields
from nova.objects import instance as instance_obj
from nova.objects import migrate_data as migrate_data_obj
from nova.objects import network_request as net_req_obj
+from nova.objects import service as service_obj
from nova.pci import request as pci_request
from nova.scheduler.client import report
from nova import test
@@ -76,6 +77,7 @@ from nova.virt import driver as virt_driver
from nova.virt import event as virtevent
from nova.virt import fake as fake_driver
from nova.virt import hardware
+from nova.virt import node as virt_node
from nova.volume import cinder
@@ -86,6 +88,11 @@ fake_host_list = [mock.sentinel.host1]
@ddt.ddt
class ComputeManagerUnitTestCase(test.NoDBTestCase,
fake_resource_tracker.RTMockMixin):
+ # os-brick>=5.1 now uses external file system locks instead of internal
+ # locks so we need to set up locking
+ REQUIRES_LOCKING = True
+ STUB_COMPUTE_ID = False
+
def setUp(self):
super(ComputeManagerUnitTestCase, self).setUp()
self.compute = manager.ComputeManager()
@@ -344,6 +351,46 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
self.context, mock.sentinel.node, startup=True)
log_mock.exception.assert_called_once()
+ def test_update_available_resource_for_node_pci_placement_failed_startup(
+ self
+ ):
+ """If the PCI placement translation failed during startup then the
+ exception is raised up to kill the service
+ """
+ rt = self._mock_rt(spec_set=['update_available_resource'])
+ rt.update_available_resource.side_effect = (
+ exception.PlacementPciException(error='error'))
+
+ self.assertRaises(
+ exception.PlacementPciException,
+ self.compute._update_available_resource_for_node,
+ self.context,
+ mock.sentinel.node,
+ startup=True,
+ )
+ rt.update_available_resource.assert_called_once_with(
+ self.context, mock.sentinel.node, startup=True)
+
+ @mock.patch('nova.compute.manager.LOG')
+ def test_update_available_resource_for_node_pci_placement_failed_later(
+ self, mock_log
+ ):
+ """If the PCI placement translation failed later (not at startup)
+ during a periodic then the exception is just logged
+ """
+ rt = self._mock_rt(spec_set=['update_available_resource'])
+ rt.update_available_resource.side_effect = (
+ exception.PlacementPciException(error='error'))
+
+ self.compute._update_available_resource_for_node(
+ self.context, mock.sentinel.node, startup=False)
+ rt.update_available_resource.assert_called_once_with(
+ self.context, mock.sentinel.node, startup=False)
+ mock_log.exception.assert_called_once_with(
+ 'Error updating PCI resources for node %(node)s.',
+ {'node': mock.sentinel.node}
+ )
+
@mock.patch.object(manager, 'LOG')
@mock.patch.object(manager.ComputeManager,
'_update_available_resource_for_node')
@@ -862,6 +909,8 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
return instance_obj._make_instance_list(
self.context, objects.InstanceList(), db_list, None)
+ @mock.patch.object(manager.ComputeManager,
+ '_ensure_existing_node_identity')
@mock.patch.object(manager.ComputeManager, '_get_nodes')
@mock.patch.object(manager.ComputeManager,
'_error_out_instances_whose_build_was_interrupted')
@@ -880,17 +929,19 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
mock_validate_vtpm, mock_validate_pinning,
mock_destroy, mock_admin_ctxt, mock_host_get,
mock_init_host,
- mock_error_interrupted, mock_get_nodes):
+ mock_error_interrupted, mock_get_nodes,
+ mock_existing_node):
mock_admin_ctxt.return_value = self.context
inst_list = _make_instance_list(startup_instances)
mock_host_get.return_value = inst_list
our_node = objects.ComputeNode(
- host='fake-host', uuid=uuids.our_node_uuid,
+ host=self.compute.host, uuid=uuids.our_node_uuid,
hypervisor_hostname='fake-node')
mock_get_nodes.return_value = {uuids.our_node_uuid: our_node}
- self.compute.init_host()
+ self.compute.init_host(None)
+ mock_existing_node.assert_not_called()
mock_validate_pinning.assert_called_once_with(inst_list)
mock_validate_vtpm.assert_called_once_with(inst_list)
mock_destroy.assert_called_once_with(
@@ -933,8 +984,9 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
"""
mock_get_nodes.return_value = {
uuids.cn_uuid1: objects.ComputeNode(
- uuid=uuids.cn_uuid1, hypervisor_hostname='node1')}
- self.compute.init_host()
+ uuid=uuids.cn_uuid1, hypervisor_hostname='node1',
+ host=self.compute.host)}
+ self.compute.init_host(None)
mock_error_interrupted.assert_called_once_with(
test.MatchType(nova.context.RequestContext), set(),
@@ -944,16 +996,19 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
@mock.patch('nova.objects.InstanceList')
@mock.patch('nova.objects.MigrationList.get_by_filters')
- def test_cleanup_host(self, mock_miglist_get, mock_instance_list):
+ @mock.patch('nova.objects.ComputeNodeList.get_all_by_uuids')
+ def test_cleanup_host(self, mock_cnlist_get, mock_miglist_get,
+ mock_instance_list):
# just testing whether the cleanup_host method
# when fired will invoke the underlying driver's
# equivalent method.
mock_miglist_get.return_value = []
mock_instance_list.get_by_host.return_value = []
+ mock_cnlist_get.return_value = []
with mock.patch.object(self.compute, 'driver') as mock_driver:
- self.compute.init_host()
+ self.compute.init_host(None)
mock_driver.init_host.assert_called_once_with(host='fake-mini')
self.compute.cleanup_host()
@@ -1042,7 +1097,7 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
'remove_provider_tree_from_instance_allocation')
) as (mock_get_net, mock_remove_allocation):
- self.compute.init_host()
+ self.compute.init_host(None)
mock_remove_allocation.assert_called_once_with(
self.context, deleted_instance.uuid, uuids.our_node_uuid)
@@ -1095,11 +1150,11 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
uuids.evac_instance: evacuating_instance
}
our_node = objects.ComputeNode(
- host='fake-host', uuid=uuids.our_node_uuid,
+ host=self.compute.host, uuid=uuids.our_node_uuid,
hypervisor_hostname='fake-node')
mock_get_nodes.return_value = {uuids.our_node_uuid: our_node}
- self.compute.init_host()
+ self.compute.init_host(None)
mock_init_instance.assert_called_once_with(
self.context, active_instance)
@@ -1107,23 +1162,49 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
self.context, {active_instance.uuid, evacuating_instance.uuid},
mock_get_nodes.return_value.keys())
- @mock.patch.object(objects.ComputeNode, 'get_by_host_and_nodename')
- @mock.patch.object(fake_driver.FakeDriver, 'get_available_nodes')
- def test_get_nodes(self, mock_driver_get_nodes, mock_get_by_host_and_node):
- mock_driver_get_nodes.return_value = ['fake-node1', 'fake-node2']
- cn1 = objects.ComputeNode(uuid=uuids.cn1)
- cn2 = objects.ComputeNode(uuid=uuids.cn2)
- mock_get_by_host_and_node.side_effect = [cn1, cn2]
+ @mock.patch.object(objects.ComputeNodeList, 'get_all_by_uuids')
+ @mock.patch.object(fake_driver.FakeDriver, 'get_nodenames_by_uuid')
+ def test_get_nodes(self, mock_driver_get_nodes, mock_get_by_uuid):
+ mock_driver_get_nodes.return_value = {uuids.node_fake_node1: 'host',
+ uuids.node_fake_node2: 'host'}
+ # NOTE(danms): The fake driver, by default, uses
+ # uuidsentinel.node_$node_name, so we can predict the uuids it will
+ # return here.
+ cn1 = objects.ComputeNode(uuid=uuids.node_fake_node1,
+ hypervisor_hostname='host')
+ cn2 = objects.ComputeNode(uuid=uuids.node_fake_node2,
+ hypervisor_hostname='host')
+ mock_get_by_uuid.return_value = [cn1, cn2]
nodes = self.compute._get_nodes(self.context)
- self.assertEqual({uuids.cn1: cn1, uuids.cn2: cn2}, nodes)
+ self.assertEqual({uuids.node_fake_node1: cn1,
+ uuids.node_fake_node2: cn2}, nodes)
mock_driver_get_nodes.assert_called_once_with()
- mock_get_by_host_and_node.assert_has_calls([
- mock.call(self.context, self.compute.host, 'fake-node1'),
- mock.call(self.context, self.compute.host, 'fake-node2'),
- ])
+ mock_get_by_uuid.assert_called_once_with(self.context,
+ [uuids.node_fake_node1,
+ uuids.node_fake_node2])
+
+ @mock.patch.object(objects.ComputeNodeList, 'get_all_by_uuids')
+ @mock.patch.object(fake_driver.FakeDriver, 'get_nodenames_by_uuid')
+ def test_get_nodes_mismatch(self, mock_driver_get_nodes, mock_get_by_uuid):
+ # Virt driver reports a (hypervisor_) hostname of 'host1'
+ mock_driver_get_nodes.return_value = {uuids.node_fake_node1: 'host1',
+ uuids.node_fake_node2: 'host1'}
+
+ # The database records for our compute nodes (by UUID) show a
+ # hypervisor_hostname of 'host2'
+ cn1 = objects.ComputeNode(uuid=uuids.node_fake_node1,
+ hypervisor_hostname='host2')
+ cn2 = objects.ComputeNode(uuid=uuids.node_fake_node2,
+ hypervisor_hostname='host2')
+ mock_get_by_uuid.return_value = [cn1, cn2]
+
+ # Possible hostname (as reported by the virt driver) rename,
+ # which should abort our startup
+ self.assertRaises(exception.InvalidConfiguration,
+ self.compute._get_nodes, self.context)
@mock.patch.object(manager.LOG, 'warning')
@mock.patch.object(
@@ -1145,37 +1226,35 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
"is starting on this host, then you can ignore this warning.")
@mock.patch.object(manager.LOG, 'warning')
- @mock.patch.object(objects.ComputeNode, 'get_by_host_and_nodename')
- @mock.patch.object(fake_driver.FakeDriver, 'get_available_nodes')
+ @mock.patch.object(objects.ComputeNodeList, 'get_all_by_uuids')
+ @mock.patch.object(fake_driver.FakeDriver, 'get_nodenames_by_uuid')
def test_get_nodes_node_not_found(
- self, mock_driver_get_nodes, mock_get_by_host_and_node,
+ self, mock_driver_get_nodes, mock_get_all_by_uuids,
mock_log_warning):
- mock_driver_get_nodes.return_value = ['fake-node1', 'fake-node2']
- cn2 = objects.ComputeNode(uuid=uuids.cn2)
- mock_get_by_host_and_node.side_effect = [
- exception.ComputeHostNotFound(host='fake-node1'), cn2]
+ mock_driver_get_nodes.return_value = {uuids.node_1: 'fake-node1'}
+ mock_get_all_by_uuids.return_value = []
nodes = self.compute._get_nodes(self.context)
- self.assertEqual({uuids.cn2: cn2}, nodes)
+ self.assertEqual({}, nodes)
mock_driver_get_nodes.assert_called_once_with()
- mock_get_by_host_and_node.assert_has_calls([
- mock.call(self.context, self.compute.host, 'fake-node1'),
- mock.call(self.context, self.compute.host, 'fake-node2'),
- ])
+ mock_get_all_by_uuids.assert_called_once_with(self.context,
+ [uuids.node_1])
mock_log_warning.assert_called_once_with(
- "Compute node %s not found in the database. If this is the first "
- "time this service is starting on this host, then you can ignore "
- "this warning.", 'fake-node1')
+ "Compute nodes %s for host %s were not found in the database. "
+ "If this is the first time this service is starting on this host, "
+ "then you can ignore this warning.", [uuids.node_1], 'fake-mini')
def test_init_host_disk_devices_configuration_failure(self):
self.flags(max_disk_devices_to_attach=0, group='compute')
self.assertRaises(exception.InvalidConfiguration,
- self.compute.init_host)
+ self.compute.init_host, None)
@mock.patch.object(objects.InstanceList, 'get_by_host',
new=mock.Mock())
+ @mock.patch('nova.objects.ComputeNodeList.get_all_by_uuids',
+ new=mock.Mock(return_value=[mock.MagicMock()]))
@mock.patch('nova.compute.manager.ComputeManager.'
'_validate_pinning_configuration')
def test_init_host_pinning_configuration_validation_failure(self,
@@ -1186,13 +1265,15 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
mock_validate_pinning.side_effect = exception.InvalidConfiguration
self.assertRaises(exception.InvalidConfiguration,
- self.compute.init_host)
+ self.compute.init_host, None)
@mock.patch.object(objects.InstanceList, 'get_by_host',
new=mock.Mock())
@mock.patch('nova.compute.manager.ComputeManager.'
'_validate_pinning_configuration',
new=mock.Mock())
+ @mock.patch('nova.objects.ComputeNodeList.get_all_by_uuids',
+ new=mock.Mock(return_value=[mock.MagicMock()]))
@mock.patch('nova.compute.manager.ComputeManager.'
'_validate_vtpm_configuration')
def test_init_host_vtpm_configuration_validation_failure(self,
@@ -1203,7 +1284,7 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
mock_validate_vtpm.side_effect = exception.InvalidConfiguration
self.assertRaises(exception.InvalidConfiguration,
- self.compute.init_host)
+ self.compute.init_host, None)
@mock.patch.object(objects.Instance, 'save')
@mock.patch.object(objects.InstanceList, 'get_by_filters')
@@ -1306,6 +1387,36 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
self.compute._init_instance(self.context, instance)
set_error_state.assert_called_once_with(instance)
+ def test_init_instance_vif_plug_fails_missing_pci(self):
+ instance = fake_instance.fake_instance_obj(
+ self.context,
+ uuid=uuids.instance,
+ info_cache=None,
+ power_state=power_state.RUNNING,
+ vm_state=vm_states.ACTIVE,
+ task_state=None,
+ host=self.compute.host,
+ expected_attrs=['info_cache'])
+
+ with test.nested(
+ mock.patch.object(context, 'get_admin_context',
+ return_value=self.context),
+ mock.patch.object(objects.Instance, 'get_network_info',
+ return_value=network_model.NetworkInfo()),
+ mock.patch.object(self.compute.driver, 'plug_vifs',
+ side_effect=exception.PciDeviceNotFoundById("pci-addr")),
+ mock.patch("nova.compute.manager.LOG.exception"),
+ ) as (get_admin_context, get_nw_info, plug_vifs, log_exception):
+ # as this does not raise, we are sure that the compute service
+ # continues initializing the rest of the instances
+ self.compute._init_instance(self.context, instance)
+ log_exception.assert_called_once_with(
+ "Virtual interface plugging failed for instance. Probably the "
+ "vnic_type of the bound port has been changed. Nova does not "
+ "support such change.",
+ instance=instance
+ )
+
def _test__validate_pinning_configuration(self, supports_pcpus=True):
instance_1 = fake_instance.fake_instance_obj(
self.context, uuid=uuids.instance_1)
@@ -2449,10 +2560,11 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
self.assertFalse(mock_get_info.called)
self.assertFalse(mock_sync_power_state.called)
+ @mock.patch('nova.compute.resource_tracker.ResourceTracker.instance_claim')
@mock.patch('nova.compute.manager.ComputeManager.'
'_sync_instance_power_state')
def test_query_driver_power_state_and_sync_not_found_driver(
- self, mock_sync_power_state):
+ self, mock_sync_power_state, mock_claim):
error = exception.InstanceNotFound(instance_id=1)
with mock.patch.object(self.compute.driver,
'get_info', side_effect=error) as mock_get_info:
@@ -3460,7 +3572,7 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
side_effect=exception.InstanceGroupNotFound(group_uuid='')))
def test_check_can_live_migrate_destination_success(self):
self.useFixture(std_fixtures.MonkeyPatch(
- 'nova.network.neutron.API.supports_port_binding_extension',
+ 'nova.network.neutron.API.has_port_binding_extension',
lambda *args: True))
self._test_check_can_live_migrate_destination()
@@ -3468,7 +3580,7 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
side_effect=exception.InstanceGroupNotFound(group_uuid='')))
def test_check_can_live_migrate_destination_fail(self):
self.useFixture(std_fixtures.MonkeyPatch(
- 'nova.network.neutron.API.supports_port_binding_extension',
+ 'nova.network.neutron.API.has_port_binding_extension',
lambda *args: True))
self.assertRaises(
test.TestingException,
@@ -3479,7 +3591,7 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
side_effect=exception.InstanceGroupNotFound(group_uuid='')))
def test_check_can_live_migrate_destination_contains_vifs(self):
self.useFixture(std_fixtures.MonkeyPatch(
- 'nova.network.neutron.API.supports_port_binding_extension',
+ 'nova.network.neutron.API.has_port_binding_extension',
lambda *args: True))
migrate_data = self._test_check_can_live_migrate_destination()
self.assertIn('vifs', migrate_data)
@@ -3489,7 +3601,7 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
side_effect=exception.InstanceGroupNotFound(group_uuid='')))
def test_check_can_live_migrate_destination_no_binding_extended(self):
self.useFixture(std_fixtures.MonkeyPatch(
- 'nova.network.neutron.API.supports_port_binding_extension',
+ 'nova.network.neutron.API.has_port_binding_extension',
lambda *args: False))
migrate_data = self._test_check_can_live_migrate_destination()
self.assertNotIn('vifs', migrate_data)
@@ -3498,7 +3610,7 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
side_effect=exception.InstanceGroupNotFound(group_uuid='')))
def test_check_can_live_migrate_destination_src_numa_lm_false(self):
self.useFixture(std_fixtures.MonkeyPatch(
- 'nova.network.neutron.API.supports_port_binding_extension',
+ 'nova.network.neutron.API.has_port_binding_extension',
lambda *args: True))
self._test_check_can_live_migrate_destination(src_numa_lm=False)
@@ -3506,7 +3618,7 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
side_effect=exception.InstanceGroupNotFound(group_uuid='')))
def test_check_can_live_migrate_destination_src_numa_lm_true(self):
self.useFixture(std_fixtures.MonkeyPatch(
- 'nova.network.neutron.API.supports_port_binding_extension',
+ 'nova.network.neutron.API.has_port_binding_extension',
lambda *args: True))
self._test_check_can_live_migrate_destination(src_numa_lm=True)
@@ -4996,8 +5108,9 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
mock.patch.object(self.compute.reportclient,
'remove_provider_tree_from_instance_allocation'),
mock.patch('nova.objects.Instance.get_by_uuid')
- ) as (_get_intances_on_driver, destroy, migration_list, migration_save,
- get_resources, remove_allocation, instance_get_by_uuid):
+ ) as (_get_instances_on_driver, destroy, migration_list,
+ migration_save, get_resources, remove_allocation,
+ instance_get_by_uuid):
migration_list.return_value = [migration_1]
instance_get_by_uuid.return_value = instance_1
get_resources.return_value = mock.sentinel.resources
@@ -5059,15 +5172,18 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
msg = mock_log.warning.call_args_list[0]
self.assertIn('appears to not be owned by this host', msg[0][0])
- def test_init_host_pci_passthrough_whitelist_validation_failure(self):
- # Tests that we fail init_host if there is a pci.passthrough_whitelist
+ def test_init_host_pci_device_spec_validation_failure(self):
+ # Tests that we fail init_host if there is a pci.device_spec
# configured incorrectly.
- self.flags(passthrough_whitelist=[
- # it's invalid to specify both in the same devspec
- jsonutils.dumps({'address': 'foo', 'devname': 'bar'})],
- group='pci')
+ self.flags(
+ device_spec=[
+ # it's invalid to specify both in the same devspec
+ jsonutils.dumps({'address': 'foo', 'devname': 'bar'})
+ ],
+ group='pci'
+ )
self.assertRaises(exception.PciDeviceInvalidDeviceName,
- self.compute.init_host)
+ self.compute.init_host, None)
@mock.patch('nova.compute.manager.ComputeManager._instance_update')
def test_error_out_instance_on_exception_not_implemented_err(self,
@@ -5257,7 +5373,8 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
self.compute.rebuild_instance(
self.context, instance, None, None, None, None, None, None,
- recreate, False, False, None, scheduled_node, {}, None, [])
+ recreate, False, False, None, scheduled_node, {}, None, [], False,
+ None)
mock_set.assert_called_once_with(None, 'failed')
mock_notify_about_instance_usage.assert_called_once_with(
mock.ANY, instance, 'rebuild.error', fault=mock_rebuild.side_effect
@@ -5350,7 +5467,7 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
self, mock_rebuild_claim, mock_set_migration_status,
mock_validate_policy, mock_image_meta, mock_notify_exists,
mock_notify_legacy, mock_notify, mock_instance_save,
- mock_setup_networks, mock_setup_intance_network, mock_get_bdms,
+ mock_setup_networks, mock_setup_instance_network, mock_get_bdms,
mock_mutate_migration, mock_appy_migration, mock_drop_migration,
mock_context_elevated):
self.flags(api_servers=['http://localhost/image/v2'], group='glance')
@@ -5368,7 +5485,9 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
None, recreate=True, on_shared_storage=None,
preserve_ephemeral=False, migration=None,
scheduled_node='fake-node',
- limits={}, request_spec=request_spec, accel_uuids=[])
+ limits={}, request_spec=request_spec, accel_uuids=[],
+ reimage_boot_volume=False,
+ target_state=None)
mock_validate_policy.assert_called_once_with(
elevated_context, instance, {'group': [uuids.group]})
@@ -5407,7 +5526,8 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
self.context, instance, None, None, None, None, None, None,
recreate=True, on_shared_storage=None, preserve_ephemeral=False,
migration=None, scheduled_node='fake-node', limits={},
- request_spec=request_spec, accel_uuids=[])
+ request_spec=request_spec, accel_uuids=[],
+ reimage_boot_volume=False, target_state=None)
mock_validate_policy.assert_called_once_with(
elevated_context, instance, {'group': [uuids.group]})
@@ -5433,7 +5553,8 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
self.compute.rebuild_instance(
self.context, instance, None, None,
None, None, None, None, False,
- False, False, migration, None, {}, None, [])
+ False, False, migration, None, {}, None, [], False,
+ None)
self.assertFalse(mock_get.called)
self.assertEqual(node, instance.node)
self.assertEqual('done', migration.status)
@@ -5455,7 +5576,7 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
self.compute.rebuild_instance(
self.context, instance, None, None, None, None, None,
None, True, False, False, mock.sentinel.migration, None, {},
- None, [])
+ None, [], False, None)
mock_get.assert_called_once_with(mock.ANY, self.compute.host)
mock_rt.finish_evacuation.assert_called_once_with(
instance, 'new-node', mock.sentinel.migration)
@@ -5537,7 +5658,8 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
recreate, on_shared_storage,
preserve_ephemeral, {}, {},
self.allocations,
- mock.sentinel.mapping, [])
+ mock.sentinel.mapping, [],
+ False, None)
mock_notify_usage.assert_has_calls(
[mock.call(self.context, instance, "rebuild.start",
@@ -5555,8 +5677,12 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
provider_mappings=mock.sentinel.mapping)
mock_get_nw_info.assert_called_once_with(self.context, instance)
- def test_rebuild_default_impl(self):
- def _detach(context, bdms):
+ @ddt.data((False, False), (False, True), (True, False), (True, True))
+ @ddt.unpack
+ def test_rebuild_default_impl(self, is_vol_backed, reimage_boot_vol):
+ fake_image_meta = mock.MagicMock(id='fake_id')
+
+ def _detach(context, bdms, detach_root_bdm=True):
# NOTE(rpodolyaka): check that instance has been powered off by
# the time we detach block devices, exact calls arguments will be
# checked below
@@ -5582,13 +5708,20 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
mock.patch.object(self.compute, '_power_off_instance',
return_value=None),
mock.patch.object(self.compute, '_get_accel_info',
- return_value=[])
+ return_value=[]),
+ mock.patch.object(compute_utils, 'is_volume_backed_instance',
+ return_value=is_vol_backed),
+ mock.patch.object(self.compute, '_rebuild_volume_backed_instance'),
+ mock.patch.object(compute_utils, 'get_root_bdm')
) as(
mock_destroy,
mock_spawn,
mock_save,
mock_power_off,
- mock_accel_info
+ mock_accel_info,
+ mock_is_volume_backed,
+ mock_rebuild_vol_backed_inst,
+ mock_get_root,
):
instance = fake_instance.fake_instance_obj(self.context)
instance.migration_context = None
@@ -5598,9 +5731,19 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
instance.device_metadata = None
instance.task_state = task_states.REBUILDING
instance.save(expected_task_state=[task_states.REBUILDING])
+ fake_block_device_info = {
+ 'block_device_mapping': [
+ {'attachment_id': '341a8917-f74d-4473-8ee7-4ca05e5e0ab3',
+ 'volume_id': 'b7c93bb9-dfe4-41af-aa56-e6b28342fd8f',
+ 'connection_info': {'driver_volume_type': 'iscsi',
+ 'data': {'target_discovered': False,
+ 'target_portal': '127.0.0.1:3260',
+ 'target_iqn': 'iqn.2010-10.org.openstack:volume-'
+ 'b7c93bb9-dfe4-41af-aa56-e6b28342fd8f',
+ 'target_lun': 0}}}]}
self.compute._rebuild_default_impl(self.context,
instance,
- None,
+ fake_image_meta,
[],
admin_password='new_pass',
bdms=[],
@@ -5609,16 +5752,151 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
attach_block_devices=_attach,
network_info=None,
evacuate=False,
- block_device_info=None,
- preserve_ephemeral=False)
+ block_device_info=
+ fake_block_device_info,
+ preserve_ephemeral=False,
+ reimage_boot_volume=
+ reimage_boot_vol)
self.assertTrue(mock_save.called)
self.assertTrue(mock_spawn.called)
mock_destroy.assert_called_once_with(
self.context, instance,
- network_info=None, block_device_info=None)
+ network_info=None, block_device_info=fake_block_device_info)
mock_power_off.assert_called_once_with(
instance, clean_shutdown=True)
+ if is_vol_backed and reimage_boot_vol:
+ mock_rebuild_vol_backed_inst.assert_called_once_with(
+ self.context, instance, [], fake_image_meta.id)
+ else:
+ mock_rebuild_vol_backed_inst.assert_not_called()
+
+ @mock.patch('nova.volume.cinder.API.attachment_delete')
+ @mock.patch('nova.volume.cinder.API.attachment_create',
+ return_value={'id': uuids.new_attachment_id})
+ @mock.patch.object(nova.compute.manager.ComputeVirtAPI,
+ 'wait_for_instance_event')
+ def test__rebuild_volume_backed_instance(
+ self, wait_inst_event, attach_create, attach_delete):
+ fake_conn_info = '{}'
+ fake_device = 'fake_vda'
+ root_bdm = mock.MagicMock(
+ volume_id=uuids.volume_id, connection_info=fake_conn_info,
+ device_name=fake_device, attachment_id=uuids.old_attachment_id,
+ save=mock.MagicMock())
+ bdms = [root_bdm]
+ events = [('volume-reimaged', root_bdm.volume_id)]
+ image_size_gb = 1
+ deadline = CONF.reimage_timeout_per_gb * image_size_gb
+
+ with test.nested(
+ mock.patch.object(objects.Instance, 'save',
+ return_value=None),
+ mock.patch.object(compute_utils, 'get_root_bdm',
+ return_value=root_bdm),
+ mock.patch.object(self.compute, 'volume_api'),
+ mock.patch.object(self.compute.image_api, 'get'),
+ ) as (
+ mock_save,
+ mock_get_root_bdm,
+ mock_vol_api,
+ mock_get_img
+ ):
+ instance = fake_instance.fake_instance_obj(self.context)
+ instance.task_state = task_states.REBUILDING
+ # 1024 ** 3 = 1073741824
+ mock_get_img.return_value = {'size': 1073741824}
+ self.compute._rebuild_volume_backed_instance(
+ self.context, instance, bdms, uuids.image_id)
+ mock_vol_api.attachment_create.assert_called_once_with(
+ self.context, uuids.volume_id, instance.uuid)
+ mock_vol_api.attachment_delete.assert_called_once_with(
+ self.context, uuids.old_attachment_id)
+ mock_vol_api.reimage_volume.assert_called_once_with(
+ self.context, uuids.volume_id, uuids.image_id,
+ reimage_reserved=True)
+ mock_get_img.assert_called_once_with(
+ self.context, uuids.image_id)
+ mock_get_root_bdm.assert_called_once_with(
+ self.context, instance, bdms)
+ wait_inst_event.assert_called_once_with(
+ instance, events, deadline=deadline,
+ error_callback=self.compute._reimage_failed_callback)
+
+ @mock.patch('nova.volume.cinder.API.attachment_delete')
+ @mock.patch('nova.volume.cinder.API.attachment_create',
+ return_value={'id': uuids.new_attachment_id})
+ @mock.patch.object(nova.compute.manager.ComputeVirtAPI,
+ 'wait_for_instance_event')
+ def test__rebuild_volume_backed_instance_image_not_found(
+ self, wait_inst_event, attach_create, attach_delete):
+ fake_conn_info = '{}'
+ fake_device = 'fake_vda'
+ root_bdm = mock.MagicMock(
+ volume_id=uuids.volume_id, connection_info=fake_conn_info,
+ device_name=fake_device, attachment_id=uuids.old_attachment_id,
+ save=mock.MagicMock())
+ bdms = [root_bdm]
+
+ with test.nested(
+ mock.patch.object(objects.Instance, 'save',
+ return_value=None),
+ mock.patch.object(compute_utils, 'get_root_bdm',
+ return_value=root_bdm),
+ mock.patch.object(self.compute, 'volume_api'),
+ mock.patch.object(self.compute.image_api, 'get'),
+ ) as(
+ mock_save,
+ mock_get_root_bdm,
+ mock_vol_api,
+ mock_get_img
+ ):
+ mock_get_img.side_effect = exception.ImageNotFound(
+ image_id=uuids.image_id)
+ instance = fake_instance.fake_instance_obj(self.context)
+ instance.task_state = task_states.REBUILDING
+ instance.save(expected_task_state=[task_states.REBUILDING])
+ mock_get_img.return_value = {'size': 1}
+ self.assertRaises(
+ exception.BuildAbortException,
+ self.compute._rebuild_volume_backed_instance,
+ self.context, instance, bdms, uuids.image_id)
+ mock_vol_api.attachment_create.assert_called_once_with(
+ self.context, uuids.volume_id, instance.uuid)
+ mock_vol_api.attachment_delete.assert_called_once_with(
+ self.context, uuids.old_attachment_id)
+ mock_get_img.assert_called_once_with(
+ self.context, uuids.image_id)
+
+ @mock.patch.object(objects.Instance, 'save', return_value=None)
+ @mock.patch.object(fake_driver.SmallFakeDriver, 'detach_volume')
+ @mock.patch.object(cinder.API, 'roll_detaching')
+ def test__detach_root_volume(self, mock_roll_detach, mock_detach,
+ mock_save):
+ exception_list = [
+ '',
+ exception.DiskNotFound(location="not\\here"),
+ exception.DeviceDetachFailed(device="fake_dev", reason="unknown"),
+ ]
+ mock_detach.side_effect = exception_list
+ fake_conn_info = '{}'
+ fake_device = 'fake_vda'
+ root_bdm = mock.MagicMock(
+ volume_id=uuids.volume_id, connection_info=fake_conn_info,
+ device_name=fake_device, attachment_id=uuids.old_attachment_id,
+ save=mock.MagicMock())
+ instance = fake_instance.fake_instance_obj(self.context)
+ instance.task_state = task_states.REBUILDING
+ instance.save(expected_task_state=[task_states.REBUILDING])
+ self.compute._detach_root_volume(self.context, instance, root_bdm)
+ self.compute._detach_root_volume(self.context, instance, root_bdm)
+ self.assertRaises(exception.DeviceDetachFailed,
+ self.compute._detach_root_volume,
+ self.context, instance, root_bdm)
+ mock_roll_detach.assert_called_with(self.context, uuids.volume_id)
+ self.assertRaises(Exception, self.compute._detach_root_volume, # noqa
+ self.context, instance, root_bdm)
+ mock_roll_detach.assert_called_with(self.context, uuids.volume_id)
def test_do_rebuild_instance_check_trusted_certs(self):
"""Tests the scenario that we're rebuilding an instance with
@@ -5640,7 +5918,7 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
request_spec=objects.RequestSpec(),
allocations=self.allocations,
request_group_resource_providers_mapping=mock.sentinel.mapping,
- accel_uuids=[])
+ accel_uuids=[], reimage_boot_volume=False, target_state=None)
self.assertIn('Trusted image certificates provided on host', str(ex))
def test_reverts_task_state_instance_not_found(self):
@@ -6084,6 +6362,171 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
self.assertEqual({'one-image': 'cached',
'two-image': 'existing'}, r)
+ @mock.patch.object(virt_node, 'write_local_node_uuid')
+ @mock.patch.object(virt_node, 'read_local_node_uuid')
+ def test_ensure_node_uuid_not_needed_version(self, mock_read, mock_write):
+ # Make sure an up-to-date service bypasses the persistence
+ service_ref = service_obj.Service()
+ self.assertEqual(service_obj.SERVICE_VERSION, service_ref.version)
+ mock_read.return_value = 'not none'
+ mock_write.assert_not_called()
+ self.compute._ensure_existing_node_identity(service_ref)
+ mock_write.assert_not_called()
+
+ @mock.patch.object(virt_node, 'write_local_node_uuid')
+ def test_ensure_node_uuid_not_needed_ironic(self, mock_node):
+ # Make sure an old service for ironic does not write a local node uuid
+ with mock.patch.object(service_obj, 'SERVICE_VERSION', new=60):
+ service_ref = service_obj.Service()
+ self.assertEqual(60, service_ref.version)
+ self.flags(compute_driver='ironic')
+ self.compute._ensure_existing_node_identity(service_ref)
+ mock_node.assert_not_called()
+
+ @mock.patch.object(virt_node, 'write_local_node_uuid')
+ @mock.patch.object(virt_node, 'read_local_node_uuid')
+ def test_ensure_node_uuid_not_needed_preprovisioned(self,
+ mock_read_node,
+ mock_write_node):
+ # Make sure an old service does not write a uuid if one is present
+ with mock.patch.object(service_obj, 'SERVICE_VERSION', new=60):
+ service_ref = service_obj.Service()
+ self.assertEqual(60, service_ref.version)
+ mock_read_node.return_value = str(uuids.SOME_UUID)
+ self.compute._ensure_existing_node_identity(service_ref)
+ mock_write_node.assert_not_called()
+
+ @mock.patch.object(virt_node, 'write_local_node_uuid')
+ @mock.patch.object(virt_node, 'read_local_node_uuid')
+ @mock.patch('nova.objects.ComputeNodeList.get_all_by_host')
+ def test_ensure_node_uuid_upgrade_no_node(self, mock_get_cn,
+ mock_read_node,
+ mock_write_node):
+ # If we are not a new service, we have no pre-provisioned node uuid
+ # and we find no nodes in the database, we do not write a local
+ # node uuid *and* we abort startup since something is likely wrong.
+ with mock.patch.object(service_obj, 'SERVICE_VERSION', new=60):
+ service_ref = service_obj.Service()
+ self.assertEqual(60, service_ref.version)
+ mock_read_node.return_value = None
+ mock_get_cn.return_value = []
+ self.assertRaises(exception.InvalidConfiguration,
+ self.compute._ensure_existing_node_identity,
+ service_ref)
+ mock_get_cn.assert_called_once_with(mock.ANY, self.compute.host)
+ mock_write_node.assert_not_called()
+
+ @mock.patch.object(virt_node, 'write_local_node_uuid')
+ @mock.patch.object(virt_node, 'read_local_node_uuid')
+ @mock.patch('nova.objects.ComputeNodeList.get_all_by_host')
+ def test_ensure_node_uuid_upgrade_multi_node(self, mock_get_cn,
+ mock_read_node,
+ mock_write_node):
+ # If we are not a new service, we have no pre-provisioned node uuid
+ # and we find multiple nodes in the database, we do not write a local
+ # node uuid *and* we abort startup since something is likely wrong.
+ with mock.patch.object(service_obj, 'SERVICE_VERSION', new=60):
+ service_ref = service_obj.Service()
+ self.assertEqual(60, service_ref.version)
+ mock_read_node.return_value = None
+ mock_get_cn.return_value = [1, 2]
+ self.assertRaises(exception.InvalidConfiguration,
+ self.compute._ensure_existing_node_identity,
+ service_ref)
+ mock_get_cn.assert_called_once_with(mock.ANY, self.compute.host)
+ mock_write_node.assert_not_called()
+
+ @mock.patch.object(virt_node, 'write_local_node_uuid')
+ @mock.patch.object(virt_node, 'read_local_node_uuid')
+ @mock.patch('nova.objects.ComputeNodeList.get_all_by_host')
+ def test_ensure_node_uuid_upgrade_writes_node_uuid(self, mock_get_cn,
+ mock_read_node,
+ mock_write_node):
+ # If we are not a new service, there is no pre-provisioned local
+ # compute node uuid, and we find exactly one compute node in the
+ # database for our host, we persist that.
+ with mock.patch.object(service_obj, 'SERVICE_VERSION', new=60):
+ service_ref = service_obj.Service()
+ self.assertEqual(60, service_ref.version)
+ mock_read_node.return_value = None
+ mock_get_cn.return_value = [
+ objects.ComputeNode(uuid=str(uuids.compute)),
+ ]
+ self.compute._ensure_existing_node_identity(service_ref)
+ mock_get_cn.assert_called_once_with(mock.ANY, self.compute.host)
+ mock_write_node.assert_called_once_with(str(uuids.compute))
+
+ @mock.patch.object(virt_node, 'read_local_node_uuid')
+ def test_ensure_node_uuid_missing_file_ironic(self, mock_read):
+ mock_service = mock.MagicMock(
+ version=service_obj.NODE_IDENTITY_VERSION)
+ mock_read.return_value = None
+ self.assertRaises(exception.InvalidConfiguration,
+ self.compute._ensure_existing_node_identity,
+ mock_service)
+ mock_read.assert_called_once_with()
+
+ # Now make sure that ironic causes this exact configuration to pass
+ self.flags(compute_driver='ironic')
+ self.compute._ensure_existing_node_identity(mock_service)
+
+ def test_ensure_node_uuid_called_by_init_host(self):
+ # test_init_host() above ensures that we do not call
+ # _ensure_existing_node_identity() in the service_ref=None case.
+ # Since testing init_host() requires a billion mocks, this
+ # tests that we do call it when expected, but make it raise
+ # to avoid running the rest of init_host().
+ with mock.patch.object(self.compute,
+ '_ensure_existing_node_identity') as m:
+ m.side_effect = test.TestingException
+ self.assertRaises(test.TestingException,
+ self.compute.init_host,
+ mock.sentinel.service_ref)
+ m.assert_called_once_with(mock.sentinel.service_ref)
+
+ def test_check_for_host_rename_ironic(self):
+ self.flags(compute_driver='ironic')
+ # Passing None here makes sure we take the early exit because of our
+ # virt driver
+ nodes = {uuids.node1: mock.MagicMock(uuid=uuids.node1,
+ host='not-this-host')}
+ self.compute._check_for_host_rename(nodes)
+
+ def test_check_for_host_rename_renamed_only(self):
+ nodes = {uuids.node1: mock.MagicMock(uuid=uuids.node1,
+ host='not-this-host')}
+ self.assertRaises(exception.InvalidConfiguration,
+ self.compute._check_for_host_rename, nodes)
+
+ def test_check_for_host_rename_renamed_one(self):
+ nodes = {uuids.node1: mock.MagicMock(uuid=uuids.node1,
+ host=self.compute.host),
+ uuids.node2: mock.MagicMock(uuid=uuids.node2,
+ host='not-this-host')}
+ self.assertRaises(exception.InvalidConfiguration,
+ self.compute._check_for_host_rename, nodes)
+
+ def test_check_for_host_rename_not_renamed(self):
+ nodes = {uuids.node1: mock.MagicMock(uuid=uuids.node1,
+ host=self.compute.host)}
+ with mock.patch.object(manager.LOG, 'debug') as mock_debug:
+ self.compute._check_for_host_rename(nodes)
+ mock_debug.assert_called_once_with(
+ 'Verified node %s matches my host %s',
+ uuids.node1, self.compute.host)
+
+ @mock.patch('nova.compute.manager.ComputeManager._get_nodes')
+ def test_check_for_host_rename_called_by_init_host(self, mock_nodes):
+ # Since testing init_host() requires a billion mocks, this
+ # tests that we do call it when expected, but make it raise
+ # to avoid running the rest of init_host().
+ with mock.patch.object(self.compute,
+ '_check_for_host_rename') as m:
+ m.side_effect = test.TestingException
+ self.assertRaises(test.TestingException,
+ self.compute.init_host, None)
+ m.assert_called_once_with(mock_nodes.return_value)
+
class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
def setUp(self):
@@ -6126,6 +6569,8 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
fake_rt = fake_resource_tracker.FakeResourceTracker(self.compute.host,
self.compute.driver)
self.compute.rt = fake_rt
+ self.compute.driver._set_nodes([self.node])
+ self.compute.rt.compute_nodes = {self.node: objects.ComputeNode()}
self.allocations = {
uuids.provider1: {
@@ -6415,6 +6860,7 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
mock_get_arqs.assert_called_once_with(
self.instance.uuid, only_resolved=True)
+ @mock.patch('nova.compute.resource_tracker.ResourceTracker.instance_claim')
@mock.patch.object(fake_driver.FakeDriver, 'spawn')
@mock.patch('nova.objects.Instance.save')
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
@@ -6426,7 +6872,7 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
@mock.patch.object(manager.ComputeManager, '_notify_about_instance_usage')
def test_spawn_called_with_accel_info(self, mock_ins_usage,
mock_ins_create, mock_dev_tag, mock_certs, mock_req_group_map,
- mock_get_allocations, mock_ins_save, mock_spawn):
+ mock_get_allocations, mock_ins_save, mock_spawn, mock_claim):
accel_info = [{'k1': 'v1', 'k2': 'v2'}]
@@ -6700,13 +7146,15 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
self.security_groups, self.block_device_mapping,
request_spec={}, host_lists=[fake_host_list])
+ @mock.patch('nova.compute.resource_tracker.ResourceTracker.instance_claim')
@mock.patch.object(manager.ComputeManager, '_shutdown_instance')
@mock.patch.object(manager.ComputeManager, '_build_networks_for_instance')
@mock.patch.object(fake_driver.FakeDriver, 'spawn')
@mock.patch.object(objects.Instance, 'save')
@mock.patch.object(manager.ComputeManager, '_notify_about_instance_usage')
def test_rescheduled_exception_with_non_ascii_exception(self,
- mock_notify, mock_save, mock_spawn, mock_build, mock_shutdown):
+ mock_notify, mock_save, mock_spawn, mock_build, mock_shutdown,
+ mock_claim):
exc = exception.NovaException(u's\xe9quence')
mock_build.return_value = self.network_info
@@ -6722,7 +7170,6 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
self.accel_uuids)
mock_save.assert_has_calls([
mock.call(),
- mock.call(),
mock.call(expected_task_state='block_device_mapping'),
])
mock_notify.assert_has_calls([
@@ -7228,6 +7675,7 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
self.assertEqual(10, mock_failed.call_count)
mock_succeeded.assert_not_called()
+ @mock.patch('nova.compute.resource_tracker.ResourceTracker.instance_claim')
@mock.patch.object(manager.ComputeManager, '_shutdown_instance')
@mock.patch.object(manager.ComputeManager, '_build_networks_for_instance')
@mock.patch.object(fake_driver.FakeDriver, 'spawn')
@@ -7235,7 +7683,7 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
@mock.patch.object(manager.ComputeManager, '_notify_about_instance_usage')
def _test_instance_exception(self, exc, raised_exc,
mock_notify, mock_save, mock_spawn,
- mock_build, mock_shutdown):
+ mock_build, mock_shutdown, mock_claim):
"""This method test the instance related InstanceNotFound
and reschedule on exception errors. The test cases get from
arguments.
@@ -7258,7 +7706,6 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
mock_save.assert_has_calls([
mock.call(),
- mock.call(),
mock.call(expected_task_state='block_device_mapping')])
mock_notify.assert_has_calls([
mock.call(self.context, self.instance, 'create.start',
@@ -7369,11 +7816,12 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
'_shutdown_instance'),
mock.patch.object(self.compute,
'_validate_instance_group_policy'),
+ mock.patch.object(self.compute.rt, 'instance_claim'),
mock.patch('nova.compute.utils.notify_about_instance_create')
) as (spawn, save,
_build_networks_for_instance, _notify_about_instance_usage,
_shutdown_instance, _validate_instance_group_policy,
- mock_notify):
+ mock_claim, mock_notify):
self.assertRaises(exception.BuildAbortException,
self.compute._build_and_run_instance, self.context,
@@ -7404,7 +7852,6 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
save.assert_has_calls([
mock.call(),
- mock.call(),
mock.call(
expected_task_state=task_states.BLOCK_DEVICE_MAPPING)])
@@ -7466,11 +7913,12 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
request_spec={}, host_lists=[fake_host_list])
mock_nil.assert_called_once_with(self.instance)
+ @mock.patch('nova.compute.resource_tracker.ResourceTracker.instance_claim')
@mock.patch.object(manager.ComputeManager, '_build_resources')
@mock.patch.object(objects.Instance, 'save')
@mock.patch.object(manager.ComputeManager, '_notify_about_instance_usage')
def test_build_resources_buildabort_reraise(self, mock_notify, mock_save,
- mock_build):
+ mock_build, mock_claim):
exc = exception.BuildAbortException(
instance_uuid=self.instance.uuid, reason='')
mock_build.side_effect = exc
@@ -7484,7 +7932,6 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
self.node, self.limits, self.filter_properties,
request_spec=[], accel_uuids=self.accel_uuids)
- mock_save.assert_called_once_with()
mock_notify.assert_has_calls([
mock.call(self.context, self.instance, 'create.start',
extra_usage_info={'image_name': self.image.get('name')}),
@@ -7585,6 +8032,27 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
instance, hints)
mock_get.assert_called_once_with(self.context, uuids.group_hint)
+ @mock.patch('nova.objects.InstanceGroup.get_by_hint')
+ def test_validate_instance_group_policy_deleted_group(self, mock_get):
+ """Tests that _validate_instance_group_policy handles the case
+ where the scheduler hint has a group but that group has been deleted.
+ This tests is a reproducer for bug: #1890244
+ """
+ instance = objects.Instance(uuid=uuids.instance)
+ hints = {'group': [uuids.group_hint]}
+ mock_get.side_effect = exception.InstanceGroupNotFound(
+ group_uuid=uuids.group_hint
+ )
+ # This implicitly asserts that no exception is raised since
+ # uncaught exceptions would be treated as a test failure.
+ self.compute._validate_instance_group_policy(
+ self.context, instance, hints
+ )
+ # and this just assert that we did in fact invoke the method
+ # that raises to ensure that if we refactor in the future this
+ # this test will fail if the function we mock is no longer called.
+ mock_get.assert_called_once_with(self.context, uuids.group_hint)
+
@mock.patch('nova.objects.InstanceGroup.get_by_uuid')
@mock.patch('nova.objects.InstanceList.get_uuids_by_host')
@mock.patch('nova.objects.InstanceGroup.get_by_hint')
@@ -7669,6 +8137,42 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
@mock.patch.object(virt_driver.ComputeDriver, 'failed_spawn_cleanup')
@mock.patch.object(virt_driver.ComputeDriver, 'prepare_for_spawn')
+ @mock.patch.object(virt_driver.ComputeDriver,
+ 'prepare_networks_before_block_device_mapping')
+ @mock.patch.object(virt_driver.ComputeDriver,
+ 'clean_networks_preparation')
+ def test_failed_prepare_for_spawn(self, mock_clean, mock_prepnet,
+ mock_prepspawn, mock_failedspawn):
+ mock_prepspawn.side_effect = exception.ComputeResourcesUnavailable(
+ reason="asdf")
+ with mock.patch.object(self.compute,
+ '_build_networks_for_instance',
+ return_value=self.network_info
+ ) as _build_networks_for_instance:
+
+ try:
+ with self.compute._build_resources(self.context, self.instance,
+ self.requested_networks, self.security_groups,
+ self.image, self.block_device_mapping,
+ self.resource_provider_mapping, self.accel_uuids):
+ pass
+ except Exception as e:
+ self.assertIsInstance(e,
+ exception.ComputeResourcesUnavailable)
+
+ _build_networks_for_instance.assert_has_calls(
+ [mock.call(self.context, self.instance,
+ self.requested_networks, self.security_groups,
+ self.resource_provider_mapping,
+ self.network_arqs)])
+
+ mock_prepnet.assert_not_called()
+ mock_clean.assert_called_once_with(self.instance, self.network_info)
+ mock_prepspawn.assert_called_once_with(self.instance)
+ mock_failedspawn.assert_called_once_with(self.instance)
+
+ @mock.patch.object(virt_driver.ComputeDriver, 'failed_spawn_cleanup')
+ @mock.patch.object(virt_driver.ComputeDriver, 'prepare_for_spawn')
@mock.patch.object(manager.ComputeManager, '_build_networks_for_instance')
def test_build_resources_aborts_on_failed_network_alloc(self, mock_build,
mock_prepspawn,
@@ -8082,10 +8586,11 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
ctxt, instance, req_networks)
warning_mock.assert_not_called()
+ @mock.patch('nova.compute.resource_tracker.ResourceTracker.instance_claim')
@mock.patch('nova.compute.utils.notify_about_instance_create')
@mock.patch.object(manager.ComputeManager, '_instance_update')
def test_launched_at_in_create_end_notification(self,
- mock_instance_update, mock_notify_instance_create):
+ mock_instance_update, mock_notify_instance_create, mock_claim):
def fake_notify(*args, **kwargs):
if args[2] == 'create.end':
@@ -8125,6 +8630,7 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
self.flags(default_access_ip_network_name='test1')
instance = fake_instance.fake_db_instance()
+ @mock.patch.object(self.compute.rt, 'instance_claim')
@mock.patch.object(db, 'instance_update_and_get_original',
return_value=({}, instance))
@mock.patch.object(self.compute.driver, 'spawn')
@@ -8133,7 +8639,7 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
@mock.patch.object(db, 'instance_extra_update_by_uuid')
@mock.patch.object(self.compute, '_notify_about_instance_usage')
def _check_access_ip(mock_notify, mock_extra, mock_networks,
- mock_spawn, mock_db_update):
+ mock_spawn, mock_db_update, mock_claim):
self.compute._build_and_run_instance(self.context, self.instance,
self.image, self.injected_files, self.admin_pass,
self.requested_networks, self.security_groups,
@@ -8154,8 +8660,10 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
_check_access_ip()
+ @mock.patch('nova.compute.resource_tracker.ResourceTracker.instance_claim')
@mock.patch.object(manager.ComputeManager, '_instance_update')
- def test_create_error_on_instance_delete(self, mock_instance_update):
+ def test_create_error_on_instance_delete(self, mock_instance_update,
+ mock_claim):
def fake_notify(*args, **kwargs):
if args[2] == 'create.error':
@@ -8169,7 +8677,7 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
mock.patch.object(self.compute,
'_build_networks_for_instance', return_value=[]),
mock.patch.object(self.instance, 'save',
- side_effect=[None, None, None, exc]),
+ side_effect=[None, None, exc]),
mock.patch.object(self.compute, '_notify_about_instance_usage',
side_effect=fake_notify)
) as (mock_spawn, mock_networks, mock_save, mock_notify):
@@ -8198,7 +8706,8 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
mock.patch.object(
self.compute, '_build_networks_for_instance', return_value=[]),
mock.patch.object(self.instance, 'save'),
- ) as (mock_spawn, mock_networks, mock_save):
+ mock.patch.object(self.compute.rt, 'instance_claim'),
+ ) as (mock_spawn, mock_networks, mock_save, mock_claim):
self.compute._build_and_run_instance(
self.context,
self.instance, self.image, self.injected_files,
@@ -8229,11 +8738,17 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
# resource request and therefore no matching request group exists in
# the request spec.
self.instance.pci_requests = objects.InstancePCIRequests(requests=[
- objects.InstancePCIRequest(),
objects.InstancePCIRequest(
+ request_id=uuids.req0,
+ ),
+ objects.InstancePCIRequest(
+ request_id=uuids.req1,
requester_id=uuids.port1,
spec=[{'vendor_id': '1377', 'product_id': '0047'}]),
- objects.InstancePCIRequest(requester_id=uuids.port2),
+ objects.InstancePCIRequest(
+ request_id=uuids.req2,
+ requester_id=uuids.port2,
+ ),
])
with test.nested(
mock.patch.object(self.compute.driver, 'spawn'),
@@ -8242,7 +8757,8 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
mock.patch.object(self.instance, 'save'),
mock.patch('nova.scheduler.client.report.'
'SchedulerReportClient._get_resource_provider'),
- ) as (mock_spawn, mock_networks, mock_save, mock_get_rp):
+ mock.patch.object(self.compute.rt, 'instance_claim'),
+ ) as (mock_spawn, mock_networks, mock_save, mock_get_rp, mock_claim):
mock_get_rp.return_value = {
'uuid': uuids.rp1,
'name': 'compute1:sriov-agent:ens3'
@@ -8278,8 +8794,13 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
requester_id=uuids.port1,
provider_uuids=[uuids.rp1])])
- self.instance.pci_requests = objects.InstancePCIRequests(requests=[
- objects.InstancePCIRequest(requester_id=uuids.port1)])
+ self.instance.pci_requests = objects.InstancePCIRequests(
+ requests=[
+ objects.InstancePCIRequest(
+ requester_id=uuids.port1, request_id=uuids.req1
+ )
+ ]
+ )
with mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'_get_resource_provider') as (mock_get_rp):
mock_get_rp.return_value = None
@@ -8301,8 +8822,13 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
requester_id=uuids.port1,
provider_uuids=[uuids.rp1])])
- self.instance.pci_requests = objects.InstancePCIRequests(requests=[
- objects.InstancePCIRequest(requester_id=uuids.port1)])
+ self.instance.pci_requests = objects.InstancePCIRequests(
+ requests=[
+ objects.InstancePCIRequest(
+ requester_id=uuids.port1, request_id=uuids.req1
+ )
+ ]
+ )
with mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'_get_resource_provider') as (mock_get_rp):
mock_get_rp.return_value = {
@@ -8326,8 +8852,13 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
requester_id=uuids.port1,
provider_uuids=[uuids.rp1, uuids.rp2])])
- self.instance.pci_requests = objects.InstancePCIRequests(requests=[
- objects.InstancePCIRequest(requester_id=uuids.port1)])
+ self.instance.pci_requests = objects.InstancePCIRequests(
+ requests=[
+ objects.InstancePCIRequest(
+ requester_id=uuids.port1, request_id=uuids.req1
+ )
+ ]
+ )
self.assertRaises(
exception.BuildAbortException,
@@ -8563,11 +9094,9 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase,
@mock.patch.object(self.compute.network_api, 'setup_networks_on_host')
@mock.patch.object(self.compute.network_api, 'migrate_instance_start')
@mock.patch.object(compute_utils, 'notify_usage_exists')
- @mock.patch.object(self.migration, 'save')
@mock.patch.object(objects.BlockDeviceMappingList,
'get_by_instance_uuid')
def do_test(get_by_instance_uuid,
- migration_save,
notify_usage_exists,
migrate_instance_start,
setup_networks_on_host,
@@ -8639,7 +9168,6 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase,
@mock.patch.object(self.compute.network_api, 'migrate_instance_finish',
side_effect=_migrate_instance_finish)
@mock.patch.object(self.compute.network_api, 'setup_networks_on_host')
- @mock.patch.object(self.migration, 'save')
@mock.patch.object(self.instance, 'save')
@mock.patch.object(self.compute, '_set_instance_info')
@mock.patch.object(db, 'instance_fault_create')
@@ -8653,7 +9181,6 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase,
fault_create,
set_instance_info,
instance_save,
- migration_save,
setup_networks_on_host,
migrate_instance_finish,
get_instance_nw_info,
@@ -8697,11 +9224,9 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase,
@mock.patch.object(self.compute.network_api, 'migrate_instance_start')
@mock.patch.object(compute_utils, 'notify_usage_exists')
@mock.patch.object(db, 'instance_extra_update_by_uuid')
- @mock.patch.object(self.migration, 'save')
@mock.patch.object(objects.BlockDeviceMappingList,
'get_by_instance_uuid')
def do_revert_resize(mock_get_by_instance_uuid,
- mock_migration_save,
mock_extra_update,
mock_notify_usage_exists,
mock_migrate_instance_start,
@@ -8748,7 +9273,6 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase,
@mock.patch.object(compute_utils, 'notify_about_instance_action')
@mock.patch.object(self.compute, "_set_instance_info")
@mock.patch.object(self.instance, 'save')
- @mock.patch.object(self.migration, 'save')
@mock.patch.object(compute_utils, 'add_instance_fault_from_exc')
@mock.patch.object(db, 'instance_fault_create')
@mock.patch.object(db, 'instance_extra_update_by_uuid')
@@ -8772,7 +9296,6 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase,
mock_extra_update,
mock_fault_create,
mock_fault_from_exc,
- mock_mig_save,
mock_inst_save,
mock_set,
mock_notify_about_instance_action,
@@ -8866,7 +9389,6 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase,
@mock.patch.object(self.compute, '_delete_scheduler_instance_info')
@mock.patch('nova.objects.Instance.get_by_uuid')
@mock.patch('nova.objects.Migration.get_by_id')
- @mock.patch.object(self.migration, 'save')
@mock.patch.object(self.compute, '_notify_about_instance_usage')
@mock.patch.object(self.compute, 'network_api')
@mock.patch.object(self.compute.driver, 'confirm_migration')
@@ -8875,7 +9397,7 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase,
@mock.patch.object(self.instance, 'save')
def do_confirm_resize(mock_save, mock_drop, mock_delete,
mock_confirm, mock_nwapi, mock_notify,
- mock_mig_save, mock_mig_get, mock_inst_get,
+ mock_mig_get, mock_inst_get,
mock_delete_scheduler_info):
self._mock_rt()
@@ -8958,16 +9480,16 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase,
instance_get_by_uuid.assert_called_once()
def test_confirm_resize_calls_virt_driver_with_old_pci(self):
- @mock.patch.object(self.migration, 'save')
@mock.patch.object(self.compute, '_notify_about_instance_usage')
@mock.patch.object(self.compute, 'network_api')
@mock.patch.object(self.compute.driver, 'confirm_migration')
@mock.patch.object(self.compute, '_delete_allocation_after_move')
@mock.patch.object(self.instance, 'drop_migration_context')
@mock.patch.object(self.instance, 'save')
- def do_confirm_resize(mock_save, mock_drop, mock_delete,
- mock_confirm, mock_nwapi, mock_notify,
- mock_mig_save):
+ def do_confirm_resize(
+ mock_save, mock_drop, mock_delete, mock_confirm, mock_nwapi,
+ mock_notify
+ ):
# Mock virt driver confirm_resize() to save the provided
# network_info, we will check it later.
updated_nw_info = []
@@ -8983,10 +9505,12 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase,
self._mock_rt()
old_devs = objects.PciDeviceList(
objects=[objects.PciDevice(
+ compute_node_id=1,
address='0000:04:00.2',
request_id=uuids.pcidev1)])
new_devs = objects.PciDeviceList(
objects=[objects.PciDevice(
+ compute_node_id=2,
address='0000:05:00.3',
request_id=uuids.pcidev1)])
self.instance.migration_context = objects.MigrationContext(
@@ -9135,9 +9659,15 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase,
self.assertEqual(driver_console.get_connection_info.return_value,
console)
+ @mock.patch('nova.utils.pass_context')
@mock.patch('nova.compute.manager.ComputeManager.'
'_do_live_migration')
- def _test_max_concurrent_live(self, mock_lm):
+ def _test_max_concurrent_live(self, mock_lm, mock_pass_context):
+ # pass_context wraps the function, which doesn't work with a mock
+ # So we simply mock it too
+ def _mock_pass_context(runner, func, *args, **kwargs):
+ return runner(func, *args, **kwargs)
+ mock_pass_context.side_effect = _mock_pass_context
@mock.patch('nova.objects.Migration.save')
def _do_it(mock_mig_save):
@@ -9539,7 +10069,8 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase,
self.assertEqual('error', self.migration.status)
mock_rollback_live_mig.assert_called_once_with(
self.context, self.instance, 'dest-host',
- migrate_data=migrate_data, source_bdms=source_bdms)
+ migrate_data=migrate_data, source_bdms=source_bdms,
+ pre_live_migration=True)
@mock.patch('nova.compute.rpcapi.ComputeAPI.pre_live_migration')
@mock.patch('nova.compute.manager.ComputeManager._rollback_live_migration')
@@ -9574,7 +10105,8 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase,
self.assertEqual('error', self.migration.status)
mock_rollback_live_mig.assert_called_once_with(
self.context, self.instance, 'dest-host',
- migrate_data=migrate_data, source_bdms=source_bdms)
+ migrate_data=migrate_data, source_bdms=source_bdms,
+ pre_live_migration=True)
@mock.patch('nova.compute.rpcapi.ComputeAPI.pre_live_migration')
@mock.patch('nova.compute.manager.ComputeManager._rollback_live_migration')
@@ -9956,6 +10488,27 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase,
self.instance,
migration)
+ def test_post_live_migration_update_host(self):
+ @mock.patch.object(self.compute, '_get_compute_info')
+ def _test_post_live_migration(_get_compute_info):
+ dest_host = 'dest'
+ cn = objects.ComputeNode(hypervisor_hostname=dest_host)
+ _get_compute_info.return_value = cn
+ instance = fake_instance.fake_instance_obj(self.context,
+ node='src',
+ uuid=uuids.instance)
+ with mock.patch.object(self.compute, "_post_live_migration"
+ ) as plm, mock.patch.object(instance, "save") as save:
+ error = ValueError("some failure")
+ plm.side_effect = error
+ self.assertRaises(
+ ValueError, self.compute._post_live_migration_update_host,
+ self.context, instance, dest_host)
+ save.assert_called_once()
+ self.assertEqual(instance.host, dest_host)
+
+ _test_post_live_migration()
+
def test_post_live_migration_cinder_pre_344_api(self):
# Because live migration has
# succeeded,_post_live_migration_remove_source_vol_connections()
@@ -10420,19 +10973,34 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase,
action='live_migration_abort', phase='end')]
)
+ @mock.patch.object(objects.Instance, 'save')
+ @mock.patch.object(manager.ComputeManager, '_revert_allocation')
@mock.patch.object(manager.ComputeManager, '_notify_about_instance_usage')
@mock.patch.object(objects.Migration, 'get_by_id')
@mock.patch('nova.compute.utils.notify_about_instance_action')
def test_live_migration_abort_queued(self, mock_notify_action,
- mock_get_migration, mock_notify):
+ mock_get_migration, mock_notify,
+ mock_revert_allocations,
+ mock_instance_save):
instance = objects.Instance(id=123, uuid=uuids.instance)
migration = self._get_migration(10, 'queued', 'live-migration')
+ migration.dest_compute = uuids.dest
+ migration.dest_node = uuids.dest
migration.save = mock.MagicMock()
mock_get_migration.return_value = migration
fake_future = mock.MagicMock()
self.compute._waiting_live_migrations[instance.uuid] = (
migration, fake_future)
- self.compute.live_migration_abort(self.context, instance, migration.id)
+ with mock.patch.object(
+ self.compute.network_api,
+ 'setup_networks_on_host') as mock_setup_net:
+ self.compute.live_migration_abort(
+ self.context, instance, migration.id)
+ mock_setup_net.assert_called_once_with(
+ self.context, instance, host=migration.dest_compute,
+ teardown=True)
+ mock_revert_allocations.assert_called_once_with(
+ self.context, instance, migration)
mock_notify.assert_has_calls(
[mock.call(self.context, instance,
'live.migration.abort.start'),
@@ -10772,7 +11340,7 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase,
@mock.patch('nova.compute.resource_tracker.ResourceTracker.resize_claim')
@mock.patch('nova.objects.Instance.save')
@mock.patch('nova.compute.utils.'
- 'update_pci_request_spec_with_allocated_interface_name')
+ 'update_pci_request_with_placement_allocations')
@mock.patch('nova.compute.utils.notify_usage_exists')
@mock.patch('nova.compute.manager.ComputeManager.'
'_notify_about_instance_usage')
@@ -10806,7 +11374,7 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase,
@mock.patch('nova.compute.resource_tracker.ResourceTracker.resize_claim')
@mock.patch('nova.objects.Instance.save')
@mock.patch('nova.compute.utils.'
- 'update_pci_request_spec_with_allocated_interface_name')
+ 'update_pci_request_with_placement_allocations')
@mock.patch('nova.compute.utils.notify_usage_exists')
@mock.patch('nova.compute.manager.ComputeManager.'
'_notify_about_instance_usage')
@@ -10940,40 +11508,94 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase,
_test()
def test__update_migrate_vifs_profile_with_pci(self):
- # Define two migrate vifs with only one pci that is required
- # to be updated. Make sure method under test updated the correct one
+ # Define three migrate vifs with two pci devs that are required
+ # to be updated, one VF and on PF.
+ # Make sure method under test updated the correct devs with the correct
+ # values.
nw_vifs = network_model.NetworkInfo(
- [network_model.VIF(
- id=uuids.port0,
- vnic_type='direct',
- type=network_model.VIF_TYPE_HW_VEB,
- profile={'pci_slot': '0000:04:00.3',
- 'pci_vendor_info': '15b3:1018',
- 'physical_network': 'default'}),
- network_model.VIF(
- id=uuids.port1,
- vnic_type='normal',
- type=network_model.VIF_TYPE_OVS,
- profile={'some': 'attribute'})])
- pci_dev = objects.PciDevice(request_id=uuids.pci_req,
- address='0000:05:00.4',
- vendor_id='15b3',
- product_id='1018')
- port_id_to_pci_dev = {uuids.port0: pci_dev}
- mig_vifs = migrate_data_obj.VIFMigrateData.\
- create_skeleton_migrate_vifs(nw_vifs)
- self.compute._update_migrate_vifs_profile_with_pci(mig_vifs,
- port_id_to_pci_dev)
+ [
+ network_model.VIF(
+ id=uuids.port0,
+ vnic_type='direct',
+ type=network_model.VIF_TYPE_HW_VEB,
+ profile={
+ 'pci_slot': '0000:04:00.3',
+ 'pci_vendor_info': '15b3:1018',
+ 'physical_network': 'default',
+ },
+ ),
+ network_model.VIF(
+ id=uuids.port1,
+ vnic_type='normal',
+ type=network_model.VIF_TYPE_OVS,
+ profile={'some': 'attribute'},
+ ),
+ network_model.VIF(
+ id=uuids.port2,
+ vnic_type='direct-physical',
+ type=network_model.VIF_TYPE_HOSTDEV,
+ profile={
+ 'pci_slot': '0000:01:00',
+ 'pci_vendor_info': '8086:154d',
+ 'physical_network': 'physnet2',
+ },
+ ),
+ ]
+ )
+
+ pci_vf_dev = objects.PciDevice(
+ request_id=uuids.pci_req,
+ address='0000:05:00.4',
+ parent_addr='0000:05:00',
+ vendor_id='15b3',
+ product_id='1018',
+ compute_node_id=13,
+ dev_type=fields.PciDeviceType.SRIOV_VF,
+ )
+ pci_pf_dev = objects.PciDevice(
+ request_id=uuids.pci_req2,
+ address='0000:01:00',
+ parent_addr='0000:02:00',
+ vendor_id='8086',
+ product_id='154d',
+ compute_node_id=13,
+ dev_type=fields.PciDeviceType.SRIOV_PF,
+ extra_info={'mac_address': 'b4:96:91:34:f4:36'},
+ )
+ port_id_to_pci_dev = {
+ uuids.port0: pci_vf_dev,
+ uuids.port2: pci_pf_dev,
+ }
+ mig_vifs = (
+ migrate_data_obj.VIFMigrateData.create_skeleton_migrate_vifs(
+ nw_vifs)
+ )
+
+ self.compute._update_migrate_vifs_profile_with_pci(
+ mig_vifs, port_id_to_pci_dev)
+
# Make sure method under test updated the correct one.
- changed_mig_vif = mig_vifs[0]
+ changed_vf_mig_vif = mig_vifs[0]
unchanged_mig_vif = mig_vifs[1]
+ changed_pf_mig_vif = mig_vifs[2]
# Migrate vifs profile was updated with pci_dev.address
# for port ID uuids.port0.
- self.assertEqual(changed_mig_vif.profile['pci_slot'],
- pci_dev.address)
+ self.assertEqual(changed_vf_mig_vif.profile['pci_slot'],
+ pci_vf_dev.address)
+ # MAC is not added as this is a VF
+ self.assertNotIn('device_mac_address', changed_vf_mig_vif.profile)
# Migrate vifs profile was unchanged for port ID uuids.port1.
# i.e 'profile' attribute does not exist.
self.assertNotIn('profile', unchanged_mig_vif)
+ # Migrate vifs profile was updated with pci_dev.address
+ # for port ID uuids.port2.
+ self.assertEqual(changed_pf_mig_vif.profile['pci_slot'],
+ pci_pf_dev.address)
+ # MAC is updated as this is a PF
+ self.assertEqual(
+ 'b4:96:91:34:f4:36',
+ changed_pf_mig_vif.profile['device_mac_address']
+ )
def test_get_updated_nw_info_with_pci_mapping(self):
old_dev = objects.PciDevice(address='0000:04:00.2')
diff --git a/nova/tests/unit/compute/test_flavors.py b/nova/tests/unit/compute/test_flavors.py
index 82434a9473..ba0eabc77d 100644
--- a/nova/tests/unit/compute/test_flavors.py
+++ b/nova/tests/unit/compute/test_flavors.py
@@ -196,7 +196,7 @@ class TestCreateFlavor(test.TestCase):
def test_rxtx_factor_must_be_within_sql_float_range(self):
# We do * 10 since this is an approximation and we need to make sure
- # the difference is noticeble.
+ # the difference is noticeable.
over_rxtx_factor = db_const.SQL_SP_FLOAT_MAX * 10
self.assertInvalidInput('flavor1', 64, 1, 120,
diff --git a/nova/tests/unit/compute/test_host_api.py b/nova/tests/unit/compute/test_host_api.py
index e4c310deb0..7f9e862057 100644
--- a/nova/tests/unit/compute/test_host_api.py
+++ b/nova/tests/unit/compute/test_host_api.py
@@ -14,8 +14,9 @@
# License for the specific language governing permissions and limitations
# under the License.
+from unittest import mock
+
import fixtures
-import mock
import oslo_messaging as messaging
from oslo_utils.fixture import uuidsentinel as uuids
diff --git a/nova/tests/unit/compute/test_instance_list.py b/nova/tests/unit/compute/test_instance_list.py
index e6e195e9cc..6544ddc801 100644
--- a/nova/tests/unit/compute/test_instance_list.py
+++ b/nova/tests/unit/compute/test_instance_list.py
@@ -10,7 +10,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from oslo_utils.fixture import uuidsentinel as uuids
from nova.compute import instance_list
diff --git a/nova/tests/unit/compute/test_keypairs.py b/nova/tests/unit/compute/test_keypairs.py
index 7860f3d529..8822cb4522 100644
--- a/nova/tests/unit/compute/test_keypairs.py
+++ b/nova/tests/unit/compute/test_keypairs.py
@@ -14,13 +14,16 @@
# under the License.
"""Tests for keypair API."""
-import mock
+from unittest import mock
+
from oslo_concurrency import processutils
from oslo_config import cfg
+from oslo_limit import fixture as limit_fixture
from nova.compute import api as compute_api
from nova import context
from nova import exception
+from nova.limit import local as local_limit
from nova.objects import keypair as keypair_obj
from nova import quota
from nova.tests.unit.compute import test_compute
@@ -119,25 +122,7 @@ class CreateImportSharedTestMixIn(object):
exc = self.assertRaises(exc_class, func, self.ctxt, self.ctxt.user_id,
name, *args)
- self.assertEqual(expected_message, str(exc))
-
- def assertInvalidKeypair(self, expected_message, name):
- msg = 'Keypair data is invalid: %s' % expected_message
- self.assertKeypairRaises(exception.InvalidKeypair, msg, name)
-
- def test_name_too_short(self):
- msg = ('Keypair name must be string and between 1 '
- 'and 255 characters long')
- self.assertInvalidKeypair(msg, '')
-
- def test_name_too_long(self):
- msg = ('Keypair name must be string and between 1 '
- 'and 255 characters long')
- self.assertInvalidKeypair(msg, 'x' * 256)
-
- def test_invalid_chars(self):
- msg = "Keypair name contains unsafe characters"
- self.assertInvalidKeypair(msg, '* BAD CHARACTERS! *')
+ self.assertIn(expected_message, str(exc))
def test_already_exists(self):
def db_key_pair_create_duplicate(context, keypair):
@@ -155,9 +140,51 @@ class CreateImportSharedTestMixIn(object):
return_value={'user': {
'key_pairs': CONF.quota.key_pairs}})
def test_quota_limit(self, mock_count_as_dict):
- msg = "Maximum number of key pairs exceeded"
+ msg = "Quota exceeded, too many key pairs."
self.assertKeypairRaises(exception.KeypairLimitExceeded, msg, 'foo')
+ def _test_quota_during_recheck(self, mock_method, msg):
+ # Skip for import key pair due to bug 1959732.
+ if self.func_name == 'import_key_pair':
+ self.skipTest('bug/1959732: import_key_pair missing quota recheck')
+
+ self.assertKeypairRaises(exception.KeypairLimitExceeded, msg, 'foo')
+ self.assertEqual(2, mock_method.call_count)
+
+ @mock.patch('nova.objects.Quotas.check_deltas')
+ def test_quota_during_recheck(self, mock_check):
+ """Simulate a race where this request initially has enough quota to
+ progress partially through the create path but then fails the quota
+ recheck because a parallel request filled up the quota first.
+ """
+ # First quota check succeeds, second (recheck) fails.
+ mock_check.side_effect = [None,
+ exception.OverQuota(overs='key_pairs')]
+ msg = "Quota exceeded, too many key pairs."
+ self._test_quota_during_recheck(mock_check, msg)
+
+ def test_quota_unified_limits(self):
+ self.flags(driver="nova.quota.UnifiedLimitsDriver", group="quota")
+ self.useFixture(limit_fixture.LimitFixture(
+ {'server_key_pairs': 0}, {}))
+ msg = ("Resource %s is over limit" % local_limit.KEY_PAIRS)
+ self.assertKeypairRaises(exception.KeypairLimitExceeded, msg, 'foo')
+
+ @mock.patch('nova.limit.local.enforce_db_limit')
+ def test_quota_during_recheck_unified_limits(self, mock_enforce):
+ """Simulate a race where this request initially has enough quota to
+ progress partially through the create path but then fails the quota
+ recheck because a parallel request filled up the quota first.
+ """
+ self.flags(driver="nova.quota.UnifiedLimitsDriver", group="quota")
+ self.useFixture(limit_fixture.LimitFixture(
+ {'server_key_pairs': 100}, {}))
+ # First quota check succeeds, second (recheck) fails.
+ mock_enforce.side_effect = [
+ None, exception.KeypairLimitExceeded('oslo.limit message')]
+ msg = 'oslo.limit message'
+ self._test_quota_during_recheck(mock_enforce, msg)
+
class CreateKeypairTestCase(KeypairAPITestCase, CreateImportSharedTestMixIn):
func_name = 'create_key_pair'
@@ -192,6 +219,27 @@ class CreateKeypairTestCase(KeypairAPITestCase, CreateImportSharedTestMixIn):
self.assertRaises(processutils.ProcessExecutionError,
self._check_success)
+ def test_success_unified_limits(self):
+ self.flags(driver="nova.quota.UnifiedLimitsDriver", group="quota")
+ self.useFixture(limit_fixture.LimitFixture(
+ {'server_key_pairs': 1}, {}))
+ self._check_success()
+
+ @mock.patch('nova.objects.Quotas.check_deltas')
+ def test_quota_recheck_disabled(self, mock_check):
+ self.flags(recheck_quota=False, group="quota")
+ self._check_success()
+ self.assertEqual(1, mock_check.call_count)
+
+ @mock.patch('nova.limit.local.enforce_db_limit')
+ def test_quota_recheck_disabled_unified_limits(self, mock_enforce):
+ self.flags(driver="nova.quota.UnifiedLimitsDriver", group="quota")
+ self.flags(recheck_quota=False, group="quota")
+ self.useFixture(limit_fixture.LimitFixture(
+ {'server_key_pairs': 1}, {}))
+ self._check_success()
+ self.assertEqual(1, mock_enforce.call_count)
+
class ImportKeypairTestCase(KeypairAPITestCase, CreateImportSharedTestMixIn):
func_name = 'import_key_pair'
@@ -240,6 +288,27 @@ class ImportKeypairTestCase(KeypairAPITestCase, CreateImportSharedTestMixIn):
msg = u'Keypair data is invalid: failed to generate fingerprint'
self.assertEqual(msg, str(exc))
+ def test_success_unified_limits(self):
+ self.flags(driver="nova.quota.UnifiedLimitsDriver", group="quota")
+ self.useFixture(limit_fixture.LimitFixture(
+ {'server_key_pairs': 1}, {}))
+ self._check_success()
+
+ @mock.patch('nova.objects.Quotas.check_deltas')
+ def test_quota_recheck_disabled(self, mock_check):
+ self.flags(recheck_quota=False, group="quota")
+ self._check_success()
+ self.assertEqual(1, mock_check.call_count)
+
+ @mock.patch('nova.limit.local.enforce_db_limit')
+ def test_quota_recheck_disabled_unified_limits(self, mock_enforce):
+ self.flags(driver="nova.quota.UnifiedLimitsDriver", group="quota")
+ self.flags(recheck_quota=False, group="quota")
+ self.useFixture(limit_fixture.LimitFixture(
+ {'server_key_pairs': 1}, {}))
+ self._check_success()
+ self.assertEqual(1, mock_enforce.call_count)
+
class GetKeypairTestCase(KeypairAPITestCase):
def test_success(self):
diff --git a/nova/tests/unit/compute/test_multi_cell_list.py b/nova/tests/unit/compute/test_multi_cell_list.py
index 6bb67a76b8..5906f69de2 100644
--- a/nova/tests/unit/compute/test_multi_cell_list.py
+++ b/nova/tests/unit/compute/test_multi_cell_list.py
@@ -13,7 +13,8 @@
from contextlib import contextmanager
import copy
import datetime
-import mock
+from unittest import mock
+
from oslo_utils.fixture import uuidsentinel as uuids
from nova.compute import multi_cell_list
diff --git a/nova/tests/unit/compute/test_pci_placement_translator.py b/nova/tests/unit/compute/test_pci_placement_translator.py
new file mode 100644
index 0000000000..0592186e54
--- /dev/null
+++ b/nova/tests/unit/compute/test_pci_placement_translator.py
@@ -0,0 +1,291 @@
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+import ddt
+from oslo_utils.fixture import uuidsentinel as uuids
+from unittest import mock
+
+from nova.compute import pci_placement_translator as ppt
+from nova.compute import provider_tree
+from nova import exception
+from nova.objects import fields
+from nova.objects import pci_device
+from nova.pci import devspec
+from nova import test
+
+
+def dev(v, p):
+ return pci_device.PciDevice(vendor_id=v, product_id=p)
+
+
+# NOTE(gibi): Most of the nova.compute.pci_placement_translator module is
+# covered with functional tests in
+# nova.tests.functional.libvirt.test_pci_in_placement
+@ddt.ddt
+class TestTranslator(test.NoDBTestCase):
+ def setUp(self):
+ super().setUp()
+ patcher = mock.patch(
+ "nova.compute.pci_placement_translator."
+ "_is_placement_tracking_enabled")
+ self.addCleanup(patcher.stop)
+ patcher.start()
+
+ def test_translator_skips_devices_without_matching_spec(self):
+ """As every PCI device in the PciTracker is created by matching a
+ PciDeviceSpec the translator should always be able to look up the spec
+ for a device. But if cannot then the device will be skipped and warning
+ will be emitted.
+ """
+ pci_tracker = mock.Mock()
+ pci_tracker.pci_devs = pci_device.PciDeviceList(
+ objects=[
+ pci_device.PciDevice(
+ address="0000:81:00.0",
+ status=fields.PciDeviceStatus.AVAILABLE,
+ instance_uuid=None,
+ )
+ ]
+ )
+ # So we have a device but there is no spec for it
+ pci_tracker.dev_filter.get_devspec = mock.Mock(return_value=None)
+ pci_tracker.dev_filter.specs = []
+ # we expect that the provider_tree is not touched as the device without
+ # spec is skipped, we assert that with the NonCallableMock
+ provider_tree = mock.NonCallableMock()
+
+ ppt.update_provider_tree_for_pci(
+ provider_tree, "fake-node", pci_tracker, {}, [])
+
+ self.assertIn(
+ "Device spec is not found for device 0000:81:00.0 in "
+ "[pci]device_spec. Ignoring device in Placement resource view. "
+ "This should not happen. Please file a bug.",
+ self.stdlog.logger.output
+ )
+
+ @ddt.unpack
+ @ddt.data(
+ (None, set()),
+ ("", set()),
+ ("a", {"CUSTOM_A"}),
+ ("a,b", {"CUSTOM_A", "CUSTOM_B"}),
+ ("HW_GPU_API_VULKAN", {"HW_GPU_API_VULKAN"}),
+ ("CUSTOM_FOO", {"CUSTOM_FOO"}),
+ ("custom_bar", {"CUSTOM_BAR"}),
+ ("custom-bar", {"CUSTOM_CUSTOM_BAR"}),
+ ("CUSTOM_a", {"CUSTOM_A"}),
+ ("a@!#$b123X", {"CUSTOM_A_B123X"}),
+ # Note that both trait names are normalized to the same trait
+ ("a!@b,a###b", {"CUSTOM_A_B"}),
+ )
+ def test_trait_normalization(self, trait_names, expected_traits):
+ self.assertEqual(
+ expected_traits,
+ ppt.get_traits(trait_names)
+ )
+
+ @ddt.unpack
+ @ddt.data(
+ (dev(v='1234', p='5678'), None, "CUSTOM_PCI_1234_5678"),
+ (dev(v='1234', p='5678'), "", "CUSTOM_PCI_1234_5678"),
+ (dev(v='1234', p='5678'), "PGPU", "PGPU"),
+ (dev(v='1234', p='5678'), "pgpu", "PGPU"),
+ (dev(v='1234', p='5678'), "foobar", "CUSTOM_FOOBAR"),
+ (dev(v='1234', p='5678'), "custom_foo", "CUSTOM_FOO"),
+ (dev(v='1234', p='5678'), "CUSTOM_foo", "CUSTOM_FOO"),
+ (dev(v='1234', p='5678'), "custom_FOO", "CUSTOM_FOO"),
+ (dev(v='1234', p='5678'), "CUSTOM_FOO", "CUSTOM_FOO"),
+ (dev(v='1234', p='5678'), "custom-foo", "CUSTOM_CUSTOM_FOO"),
+ (dev(v='1234', p='5678'), "a###b", "CUSTOM_A_B"),
+ (dev(v='123a', p='567b'), "", "CUSTOM_PCI_123A_567B"),
+ )
+ def test_resource_class_normalization(self, pci_dev, rc_name, expected_rc):
+ self.assertEqual(
+ expected_rc,
+ ppt.get_resource_class(
+ rc_name, pci_dev.vendor_id, pci_dev.product_id
+ ),
+ )
+
+ def test_dependent_device_pf_then_vf(self):
+ pv = ppt.PlacementView(
+ "fake-node", instances_under_same_host_resize=[])
+ pf = pci_device.PciDevice(
+ address="0000:81:00.0",
+ dev_type=fields.PciDeviceType.SRIOV_PF,
+ vendor_id="dead",
+ product_id="beef",
+ )
+ vf = pci_device.PciDevice(
+ address="0000:81:00.1",
+ parent_addr=pf.address,
+ dev_type=fields.PciDeviceType.SRIOV_VF,
+ vendor_id="dead",
+ product_id="beef",
+ )
+
+ pv._add_dev(pf, {"resource_class": "foo"})
+ ex = self.assertRaises(
+ exception.PlacementPciDependentDeviceException,
+ pv._add_dev,
+ vf,
+ {"resource_class": "bar"}
+ )
+
+ self.assertEqual(
+ "Configuring both 0000:81:00.1 and 0000:81:00.0 in "
+ "[pci]device_spec is not supported. Either the parent PF or its "
+ "children VFs can be configured.",
+ str(ex),
+ )
+
+ def test_dependent_device_vf_then_pf(self):
+ pv = ppt.PlacementView(
+ "fake-node", instances_under_same_host_resize=[])
+ pf = pci_device.PciDevice(
+ address="0000:81:00.0",
+ dev_type=fields.PciDeviceType.SRIOV_PF,
+ vendor_id="dead",
+ product_id="beef",
+ )
+ vf = pci_device.PciDevice(
+ address="0000:81:00.1",
+ parent_addr=pf.address,
+ dev_type=fields.PciDeviceType.SRIOV_VF,
+ vendor_id="dead",
+ product_id="beef",
+ )
+ vf2 = pci_device.PciDevice(
+ address="0000:81:00.2",
+ parent_addr=pf.address,
+ dev_type=fields.PciDeviceType.SRIOV_VF,
+ vendor_id="dead",
+ product_id="beef",
+ )
+
+ pv._add_dev(vf, {"resource_class": "foo"})
+ pv._add_dev(vf2, {"resource_class": "foo"})
+ ex = self.assertRaises(
+ exception.PlacementPciDependentDeviceException,
+ pv._add_dev,
+ pf,
+ {"resource_class": "bar"}
+ )
+
+ self.assertEqual(
+ "Configuring both 0000:81:00.0 and 0000:81:00.1,0000:81:00.2 in "
+ "[pci]device_spec is not supported. Either the parent PF or its "
+ "children VFs can be configured.",
+ str(ex),
+ )
+
+ def test_mixed_rc_for_sibling_vfs(self):
+ pv = ppt.PlacementView(
+ "fake-node", instances_under_same_host_resize=[])
+ vf1, vf2, vf3, vf4 = [
+ pci_device.PciDevice(
+ address="0000:81:00.%d" % f,
+ parent_addr="0000:71:00.0",
+ dev_type=fields.PciDeviceType.SRIOV_VF,
+ vendor_id="dead",
+ product_id="beef",
+ )
+ for f in range(0, 4)
+ ]
+
+ pv._add_dev(vf1, {"resource_class": "a", "traits": "foo,bar,baz"})
+ # order is irrelevant
+ pv._add_dev(vf2, {"resource_class": "a", "traits": "foo,baz,bar"})
+ # but missing trait is rejected
+ ex = self.assertRaises(
+ exception.PlacementPciMixedTraitsException,
+ pv._add_dev,
+ vf3,
+ {"resource_class": "a", "traits": "foo,bar"},
+ )
+ self.assertEqual(
+ "VFs from the same PF cannot be configured with different set of "
+ "'traits' in [pci]device_spec. We got "
+ "COMPUTE_MANAGED_PCI_DEVICE,CUSTOM_BAR,CUSTOM_FOO for "
+ "0000:81:00.2 and "
+ "COMPUTE_MANAGED_PCI_DEVICE,CUSTOM_BAR,CUSTOM_BAZ,CUSTOM_FOO "
+ "for 0000:81:00.0,0000:81:00.1.",
+ str(ex),
+ )
+ # as well as additional trait
+ ex = self.assertRaises(
+ exception.PlacementPciMixedTraitsException,
+ pv._add_dev,
+ vf4,
+ {"resource_class": "a", "traits": "foo,bar,baz,extra"}
+ )
+ self.assertEqual(
+ "VFs from the same PF cannot be configured with different set of "
+ "'traits' in [pci]device_spec. We got "
+ "COMPUTE_MANAGED_PCI_DEVICE,CUSTOM_BAR,CUSTOM_BAZ,CUSTOM_EXTRA,"
+ "CUSTOM_FOO for 0000:81:00.3 and COMPUTE_MANAGED_PCI_DEVICE,"
+ "CUSTOM_BAR,CUSTOM_BAZ,CUSTOM_FOO for 0000:81:00.0,0000:81:00.1.",
+ str(ex),
+ )
+
+ def test_translator_maps_pci_device_to_rp(self):
+ pv = ppt.PlacementView(
+ "fake-node", instances_under_same_host_resize=[])
+ vf = pci_device.PciDevice(
+ address="0000:81:00.1",
+ parent_addr="0000:71:00.0",
+ dev_type=fields.PciDeviceType.SRIOV_VF,
+ vendor_id="dead",
+ product_id="beef",
+ )
+ pf = pci_device.PciDevice(
+ address="0000:72:00.0",
+ parent_addr=None,
+ dev_type=fields.PciDeviceType.SRIOV_PF,
+ vendor_id="dead",
+ product_id="beef",
+ )
+ pt = provider_tree.ProviderTree()
+ pt.new_root("fake-node", uuids.compute_rp)
+
+ pv._add_dev(vf, {})
+ pv._add_dev(pf, {})
+ pv.update_provider_tree(pt)
+
+ self.assertEqual(
+ pt.data("fake-node_0000:71:00.0").uuid, vf.extra_info["rp_uuid"]
+ )
+ self.assertEqual(
+ pt.data("fake-node_0000:72:00.0").uuid, pf.extra_info["rp_uuid"]
+ )
+
+ def test_update_provider_tree_for_pci_update_pools(self):
+ pt = provider_tree.ProviderTree()
+ pt.new_root("fake-node", uuids.compute_rp)
+ pf = pci_device.PciDevice(
+ address="0000:72:00.0",
+ parent_addr=None,
+ dev_type=fields.PciDeviceType.SRIOV_PF,
+ vendor_id="dead",
+ product_id="beef",
+ status=fields.PciDeviceStatus.AVAILABLE,
+ )
+ pci_tracker = mock.Mock()
+ pci_tracker.pci_devs = [pf]
+ pci_tracker.dev_filter.specs = [devspec.PciDeviceSpec({})]
+
+ ppt.update_provider_tree_for_pci(pt, 'fake-node', pci_tracker, {}, [])
+
+ pci_tracker.stats.populate_pools_metadata_from_assigned_devices.\
+ assert_called_once_with()
diff --git a/nova/tests/unit/compute/test_provider_config.py b/nova/tests/unit/compute/test_provider_config.py
index b9070bd218..384d465054 100644
--- a/nova/tests/unit/compute/test_provider_config.py
+++ b/nova/tests/unit/compute/test_provider_config.py
@@ -13,13 +13,14 @@
import copy
import ddt
import fixtures
+import importlib.metadata
import microversion_parse
import os
-
from unittest import mock
from oslo_utils.fixture import uuidsentinel
from oslotest import base
+from packaging import version
from nova.compute import provider_config
from nova import exception as nova_exc
@@ -118,6 +119,17 @@ class SchemaValidationTestCasesV1(SchemaValidationMixin):
@ddt.unpack
@ddt.file_data('provider_config_data/v1/validation_error_test_data.yaml')
def test_validation_errors(self, config, expected_messages):
+ # TODO(stephenfin): Drop this once we no longer support jsonschema 3.x
+ jsonschema_version = importlib.metadata.version('jsonschema')
+ if version.parse(jsonschema_version) < version.parse('4.0.0'):
+ if expected_messages == [
+ "should not be valid under {}",
+ "validating 'not' in schema['properties']['__source_file']",
+ ]:
+ expected_messages = [
+ "{} is not allowed for",
+ "validating 'not' in schema['properties']['__source_file']", # noqa: E501
+ ]
self.run_test_validation_errors(config, expected_messages)
@ddt.unpack
diff --git a/nova/tests/unit/compute/test_resource_tracker.py b/nova/tests/unit/compute/test_resource_tracker.py
index 51545df59f..919dcb8334 100644
--- a/nova/tests/unit/compute/test_resource_tracker.py
+++ b/nova/tests/unit/compute/test_resource_tracker.py
@@ -12,12 +12,14 @@
import copy
import datetime
+import ddt
+from unittest import mock
from keystoneauth1 import exceptions as ks_exc
-import mock
import os_resource_classes as orc
import os_traits
from oslo_config import cfg
+from oslo_serialization import jsonutils
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import timeutils
from oslo_utils import units
@@ -62,11 +64,13 @@ _VIRT_DRIVER_AVAIL_RESOURCES = {
'hypervisor_hostname': _NODENAME,
'cpu_info': '',
'numa_topology': None,
+ 'uuid': uuids.cn1,
}
_COMPUTE_NODE_FIXTURES = [
objects.ComputeNode(
id=1,
+ deleted=False,
uuid=uuids.cn1,
host=_HOSTNAME,
vcpus=_VIRT_DRIVER_AVAIL_RESOURCES['vcpus'],
@@ -177,6 +181,7 @@ _NUMA_HOST_TOPOLOGIES = {
memory=_2MB,
cpu_usage=0,
memory_usage=0,
+ socket=0,
mempages=[_NUMA_PAGE_TOPOLOGIES['2mb*1024']],
siblings=[set([1]), set([2])],
pinned_cpus=set()),
@@ -187,6 +192,7 @@ _NUMA_HOST_TOPOLOGIES = {
memory=_2MB,
cpu_usage=0,
memory_usage=0,
+ socket=0,
mempages=[_NUMA_PAGE_TOPOLOGIES['2mb*1024']],
siblings=[set([3]), set([4])],
pinned_cpus=set())]),
@@ -584,7 +590,7 @@ class TestUpdateAvailableResources(BaseTestCase):
return_value=objects.InstancePCIRequests(requests=[]))
@mock.patch('nova.objects.PciDeviceList.get_by_compute_node',
return_value=objects.PciDeviceList())
- @mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
+ @mock.patch('nova.objects.ComputeNode.get_by_uuid')
@mock.patch('nova.objects.MigrationList.get_in_progress_and_error')
@mock.patch('nova.objects.InstanceList.get_by_host_and_node')
def test_disabled(self, get_mock, migr_mock, get_cn_mock, pci_mock,
@@ -617,7 +623,7 @@ class TestUpdateAvailableResources(BaseTestCase):
return_value=objects.InstancePCIRequests(requests=[]))
@mock.patch('nova.objects.PciDeviceList.get_by_compute_node',
return_value=objects.PciDeviceList())
- @mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
+ @mock.patch('nova.objects.ComputeNode.get_by_uuid')
@mock.patch('nova.objects.MigrationList.get_in_progress_and_error')
@mock.patch('nova.objects.InstanceList.get_by_host_and_node')
def test_no_instances_no_migrations_no_reserved(self, get_mock, migr_mock,
@@ -641,8 +647,7 @@ class TestUpdateAvailableResources(BaseTestCase):
'flavor',
'migration_context',
'resources'])
- get_cn_mock.assert_called_once_with(mock.ANY, _HOSTNAME,
- _NODENAME)
+ get_cn_mock.assert_called_once_with(mock.ANY, uuids.cn1)
migr_mock.assert_called_once_with(mock.ANY, _HOSTNAME,
_NODENAME)
@@ -669,7 +674,7 @@ class TestUpdateAvailableResources(BaseTestCase):
return_value=objects.InstancePCIRequests(requests=[]))
@mock.patch('nova.objects.PciDeviceList.get_by_compute_node',
return_value=objects.PciDeviceList())
- @mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
+ @mock.patch('nova.objects.ComputeNode.get_by_uuid')
@mock.patch('nova.objects.MigrationList.get_in_progress_and_error')
@mock.patch('nova.objects.InstanceList.get_by_host_and_node')
@mock.patch('nova.compute.resource_tracker.ResourceTracker.'
@@ -728,7 +733,7 @@ class TestUpdateAvailableResources(BaseTestCase):
return_value=objects.InstancePCIRequests(requests=[]))
@mock.patch('nova.objects.PciDeviceList.get_by_compute_node',
return_value=objects.PciDeviceList())
- @mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
+ @mock.patch('nova.objects.ComputeNode.get_by_uuid')
@mock.patch('nova.objects.MigrationList.get_in_progress_and_error')
@mock.patch('nova.objects.InstanceList.get_by_host_and_node')
def test_no_instances_no_migrations_reserved_disk_ram_and_cpu(
@@ -745,7 +750,7 @@ class TestUpdateAvailableResources(BaseTestCase):
update_mock = self._update_available_resources()
- get_cn_mock.assert_called_once_with(mock.ANY, _HOSTNAME, _NODENAME)
+ get_cn_mock.assert_called_once_with(mock.ANY, uuids.cn1)
expected_resources = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0])
vals = {
'free_disk_gb': 5, # 6GB avail - 1 GB reserved
@@ -770,7 +775,7 @@ class TestUpdateAvailableResources(BaseTestCase):
return_value=objects.InstancePCIRequests(requests=[]))
@mock.patch('nova.objects.PciDeviceList.get_by_compute_node',
return_value=objects.PciDeviceList())
- @mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
+ @mock.patch('nova.objects.ComputeNode.get_by_uuid')
@mock.patch('nova.objects.MigrationList.get_in_progress_and_error')
@mock.patch('nova.objects.InstanceList.get_by_host_and_node')
def test_some_instances_no_migrations(self, get_mock, migr_mock,
@@ -795,7 +800,7 @@ class TestUpdateAvailableResources(BaseTestCase):
update_mock = self._update_available_resources()
- get_cn_mock.assert_called_once_with(mock.ANY, _HOSTNAME, _NODENAME)
+ get_cn_mock.assert_called_once_with(mock.ANY, uuids.cn1)
expected_resources = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0])
vals = {
'free_disk_gb': 5, # 6 - 1 used
@@ -821,7 +826,7 @@ class TestUpdateAvailableResources(BaseTestCase):
return_value=objects.InstancePCIRequests(requests=[]))
@mock.patch('nova.objects.PciDeviceList.get_by_compute_node',
return_value=objects.PciDeviceList())
- @mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
+ @mock.patch('nova.objects.ComputeNode.get_by_uuid')
@mock.patch('nova.objects.MigrationList.get_in_progress_and_error')
@mock.patch('nova.objects.Instance.get_by_uuid')
@mock.patch('nova.objects.InstanceList.get_by_host_and_node')
@@ -861,7 +866,7 @@ class TestUpdateAvailableResources(BaseTestCase):
update_mock = self._update_available_resources()
- get_cn_mock.assert_called_once_with(mock.ANY, _HOSTNAME, _NODENAME)
+ get_cn_mock.assert_called_once_with(mock.ANY, uuids.cn1)
expected_resources = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0])
vals = {
'free_disk_gb': 5,
@@ -887,7 +892,7 @@ class TestUpdateAvailableResources(BaseTestCase):
return_value=objects.InstancePCIRequests(requests=[]))
@mock.patch('nova.objects.PciDeviceList.get_by_compute_node',
return_value=objects.PciDeviceList())
- @mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
+ @mock.patch('nova.objects.ComputeNode.get_by_uuid')
@mock.patch('nova.objects.MigrationList.get_in_progress_and_error')
@mock.patch('nova.objects.Instance.get_by_uuid')
@mock.patch('nova.objects.InstanceList.get_by_host_and_node')
@@ -924,7 +929,7 @@ class TestUpdateAvailableResources(BaseTestCase):
update_mock = self._update_available_resources()
- get_cn_mock.assert_called_once_with(mock.ANY, _HOSTNAME, _NODENAME)
+ get_cn_mock.assert_called_once_with(mock.ANY, uuids.cn1)
expected_resources = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0])
vals = {
'free_disk_gb': 1,
@@ -950,7 +955,7 @@ class TestUpdateAvailableResources(BaseTestCase):
return_value=objects.InstancePCIRequests(requests=[]))
@mock.patch('nova.objects.PciDeviceList.get_by_compute_node',
return_value=objects.PciDeviceList())
- @mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
+ @mock.patch('nova.objects.ComputeNode.get_by_uuid')
@mock.patch('nova.objects.MigrationList.get_in_progress_and_error')
@mock.patch('nova.objects.Instance.get_by_uuid')
@mock.patch('nova.objects.InstanceList.get_by_host_and_node')
@@ -985,7 +990,7 @@ class TestUpdateAvailableResources(BaseTestCase):
update_mock = self._update_available_resources()
- get_cn_mock.assert_called_once_with(mock.ANY, _HOSTNAME, _NODENAME)
+ get_cn_mock.assert_called_once_with(mock.ANY, uuids.cn1)
expected_resources = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0])
vals = {
'free_disk_gb': 1,
@@ -1011,7 +1016,7 @@ class TestUpdateAvailableResources(BaseTestCase):
return_value=objects.PciDeviceList())
@mock.patch('nova.objects.MigrationContext.get_by_instance_uuid',
return_value=None)
- @mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
+ @mock.patch('nova.objects.ComputeNode.get_by_uuid')
@mock.patch('nova.objects.MigrationList.get_in_progress_and_error')
@mock.patch('nova.objects.Instance.get_by_uuid')
@mock.patch('nova.objects.InstanceList.get_by_host_and_node')
@@ -1054,7 +1059,7 @@ class TestUpdateAvailableResources(BaseTestCase):
update_mock = self._update_available_resources()
- get_cn_mock.assert_called_once_with(mock.ANY, _HOSTNAME, _NODENAME)
+ get_cn_mock.assert_called_once_with(mock.ANY, uuids.cn1)
expected_resources = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0])
vals = {
# 6 total - 1G existing - 5G new flav - 1G old flav
@@ -1082,7 +1087,7 @@ class TestUpdateAvailableResources(BaseTestCase):
return_value=objects.InstancePCIRequests(requests=[]))
@mock.patch('nova.objects.PciDeviceList.get_by_compute_node',
return_value=objects.PciDeviceList())
- @mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
+ @mock.patch('nova.objects.ComputeNode.get_by_uuid')
@mock.patch('nova.objects.MigrationList.get_in_progress_and_error')
@mock.patch('nova.objects.Instance.get_by_uuid')
@mock.patch('nova.objects.InstanceList.get_by_host_and_node')
@@ -1119,7 +1124,7 @@ class TestUpdateAvailableResources(BaseTestCase):
update_mock = self._update_available_resources()
- get_cn_mock.assert_called_once_with(mock.ANY, _HOSTNAME, _NODENAME)
+ get_cn_mock.assert_called_once_with(mock.ANY, uuids.cn1)
expected_resources = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0])
vals = {
'free_disk_gb': 1,
@@ -1145,7 +1150,7 @@ class TestUpdateAvailableResources(BaseTestCase):
new=mock.Mock(return_value=objects.PciDeviceList()))
@mock.patch('nova.objects.MigrationContext.get_by_instance_uuid',
new=mock.Mock(return_value=None))
- @mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
+ @mock.patch('nova.objects.ComputeNode.get_by_uuid')
@mock.patch('nova.objects.MigrationList.get_in_progress_and_error')
@mock.patch('nova.objects.Instance.get_by_uuid')
@mock.patch('nova.objects.InstanceList.get_by_host_and_node')
@@ -1197,7 +1202,7 @@ class TestUpdateAvailableResources(BaseTestCase):
new=mock.Mock(return_value=objects.PciDeviceList()))
@mock.patch('nova.objects.MigrationContext.get_by_instance_uuid',
new=mock.Mock(return_value=None))
- @mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
+ @mock.patch('nova.objects.ComputeNode.get_by_uuid')
@mock.patch('nova.objects.MigrationList.get_in_progress_and_error')
@mock.patch('nova.objects.Instance.get_by_uuid')
@mock.patch('nova.objects.InstanceList.get_by_host_and_node')
@@ -1238,7 +1243,7 @@ class TestUpdateAvailableResources(BaseTestCase):
new=mock.Mock(return_value=False))
@mock.patch('nova.objects.PciDeviceList.get_by_compute_node',
new=mock.Mock(return_value=objects.PciDeviceList()))
- @mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
+ @mock.patch('nova.objects.ComputeNode.get_by_uuid')
@mock.patch('nova.objects.MigrationList.get_in_progress_and_error')
@mock.patch('nova.objects.InstanceList.get_by_host_and_node')
def test_check_resources_startup_fail(self, mock_get_instances,
@@ -1271,7 +1276,7 @@ class TestInitComputeNode(BaseTestCase):
return_value=objects.PciDeviceList())
@mock.patch('nova.objects.ComputeNode.create')
@mock.patch('nova.objects.Service.get_by_compute_host')
- @mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
+ @mock.patch('nova.objects.ComputeNode.get_by_uuid')
@mock.patch('nova.compute.resource_tracker.ResourceTracker.'
'_update')
def test_no_op_init_compute_node(self, update_mock, get_mock, service_mock,
@@ -1294,14 +1299,14 @@ class TestInitComputeNode(BaseTestCase):
@mock.patch('nova.objects.PciDeviceList.get_by_compute_node',
return_value=objects.PciDeviceList())
@mock.patch('nova.objects.ComputeNode.create')
- @mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
+ @mock.patch('nova.objects.ComputeNode.get_by_uuid')
@mock.patch('nova.compute.resource_tracker.ResourceTracker.'
'_update')
def test_compute_node_loaded(self, update_mock, get_mock, create_mock,
pci_mock):
self._setup_rt()
- def fake_get_node(_ctx, host, node):
+ def fake_get_node(_ctx, uuid):
res = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0])
return res
@@ -1311,85 +1316,67 @@ class TestInitComputeNode(BaseTestCase):
self.assertFalse(
self.rt._init_compute_node(mock.sentinel.ctx, resources))
- get_mock.assert_called_once_with(mock.sentinel.ctx, _HOSTNAME,
- _NODENAME)
+ get_mock.assert_called_once_with(mock.sentinel.ctx,
+ uuids.cn1)
self.assertFalse(create_mock.called)
self.assertFalse(update_mock.called)
- @mock.patch('nova.objects.ComputeNodeList.get_by_hypervisor')
@mock.patch('nova.objects.PciDeviceList.get_by_compute_node',
return_value=objects.PciDeviceList())
@mock.patch('nova.objects.ComputeNode.create')
- @mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
+ @mock.patch('nova.objects.ComputeNode.get_by_uuid')
@mock.patch('nova.compute.resource_tracker.ResourceTracker.'
'_update')
def test_compute_node_rebalanced(self, update_mock, get_mock, create_mock,
- pci_mock, get_by_hypervisor_mock):
+ pci_mock):
self._setup_rt()
self.driver_mock.rebalances_nodes = True
cn = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0])
cn.host = "old-host"
- def fake_get_all(_ctx, nodename):
- return [cn]
+ def fake_get_node(_ctx, uuid):
+ return cn
- get_mock.side_effect = exc.NotFound
- get_by_hypervisor_mock.side_effect = fake_get_all
+ get_mock.side_effect = fake_get_node
resources = copy.deepcopy(_VIRT_DRIVER_AVAIL_RESOURCES)
self.assertFalse(
self.rt._init_compute_node(mock.sentinel.ctx, resources))
- get_mock.assert_called_once_with(mock.sentinel.ctx, _HOSTNAME,
- _NODENAME)
- get_by_hypervisor_mock.assert_called_once_with(mock.sentinel.ctx,
- _NODENAME)
+ get_mock.assert_called_once_with(mock.sentinel.ctx, uuids.cn1)
create_mock.assert_not_called()
update_mock.assert_called_once_with(mock.sentinel.ctx, cn)
self.assertEqual(_HOSTNAME, self.rt.compute_nodes[_NODENAME].host)
- @mock.patch('nova.objects.ComputeNodeList.get_by_hypervisor')
@mock.patch('nova.objects.ComputeNode.create')
- @mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
+ @mock.patch('nova.objects.ComputeNode.get_by_uuid')
@mock.patch('nova.compute.resource_tracker.ResourceTracker.'
'_update')
def test_compute_node_created_on_empty(self, update_mock, get_mock,
- create_mock,
- get_by_hypervisor_mock):
- get_by_hypervisor_mock.return_value = []
- self._test_compute_node_created(update_mock, get_mock, create_mock,
- get_by_hypervisor_mock)
+ create_mock):
+ self._test_compute_node_created(update_mock, get_mock, create_mock)
- @mock.patch('nova.objects.ComputeNodeList.get_by_hypervisor')
@mock.patch('nova.objects.ComputeNode.create')
- @mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
+ @mock.patch('nova.objects.ComputeNode.get_by_uuid')
@mock.patch('nova.compute.resource_tracker.ResourceTracker.'
'_update')
def test_compute_node_created_on_empty_rebalance(self, update_mock,
get_mock,
- create_mock,
- get_by_hypervisor_mock):
- get_by_hypervisor_mock.return_value = []
+ create_mock):
self._test_compute_node_created(update_mock, get_mock, create_mock,
- get_by_hypervisor_mock,
rebalances_nodes=True)
- @mock.patch('nova.objects.ComputeNodeList.get_by_hypervisor')
@mock.patch('nova.objects.ComputeNode.create')
- @mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
+ @mock.patch('nova.objects.ComputeNode.get_by_uuid')
@mock.patch('nova.compute.resource_tracker.ResourceTracker.'
'_update')
def test_compute_node_created_too_many(self, update_mock, get_mock,
- create_mock,
- get_by_hypervisor_mock):
- get_by_hypervisor_mock.return_value = ["fake_node_1", "fake_node_2"]
+ create_mock):
self._test_compute_node_created(update_mock, get_mock, create_mock,
- get_by_hypervisor_mock,
rebalances_nodes=True)
def _test_compute_node_created(self, update_mock, get_mock, create_mock,
- get_by_hypervisor_mock,
rebalances_nodes=False):
self.flags(cpu_allocation_ratio=1.0, ram_allocation_ratio=1.0,
disk_allocation_ratio=1.0)
@@ -1450,13 +1437,9 @@ class TestInitComputeNode(BaseTestCase):
self.rt._init_compute_node(mock.sentinel.ctx, resources))
cn = self.rt.compute_nodes[_NODENAME]
- get_mock.assert_called_once_with(mock.sentinel.ctx, _HOSTNAME,
- _NODENAME)
- if rebalances_nodes:
- get_by_hypervisor_mock.assert_called_once_with(
- mock.sentinel.ctx, _NODENAME)
- else:
- get_by_hypervisor_mock.assert_not_called()
+ get_mock.assert_called_once_with(mock.sentinel.ctx,
+ uuids.compute_node_uuid)
+
create_mock.assert_called_once_with()
self.assertTrue(obj_base.obj_equal_prims(expected_compute, cn))
setup_pci.assert_called_once_with(mock.sentinel.ctx, cn, resources)
@@ -1464,7 +1447,7 @@ class TestInitComputeNode(BaseTestCase):
@mock.patch('nova.compute.resource_tracker.ResourceTracker.'
'_setup_pci_tracker')
- @mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename',
+ @mock.patch('nova.objects.ComputeNode.get_by_uuid',
side_effect=exc.ComputeHostNotFound(host=_HOSTNAME))
@mock.patch('nova.objects.ComputeNode.create',
side_effect=(test.TestingException, None))
@@ -1487,14 +1470,14 @@ class TestInitComputeNode(BaseTestCase):
self.assertTrue(self.rt._init_compute_node(ctxt, resources))
self.assertIn(_NODENAME, self.rt.compute_nodes)
mock_get.assert_has_calls([mock.call(
- ctxt, _HOSTNAME, _NODENAME)] * 2)
+ ctxt, uuids.cn_uuid)] * 2)
self.assertEqual(2, mock_create.call_count)
mock_setup_pci.assert_called_once_with(
ctxt, test.MatchType(objects.ComputeNode), resources)
@mock.patch('nova.objects.ComputeNodeList.get_by_hypervisor')
@mock.patch('nova.objects.ComputeNode.create')
- @mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
+ @mock.patch('nova.objects.ComputeNode.get_by_uuid')
@mock.patch('nova.compute.resource_tracker.ResourceTracker.'
'_update')
def test_node_removed(self, update_mock, get_mock,
@@ -1510,7 +1493,83 @@ class TestInitComputeNode(BaseTestCase):
self.assertNotIn(_NODENAME, self.rt.stats)
self.assertNotIn(_NODENAME, self.rt.old_resources)
+ @mock.patch.object(resource_tracker.ResourceTracker,
+ '_update')
+ @mock.patch.object(resource_tracker.ResourceTracker,
+ '_setup_pci_tracker')
+ def test_undelete_node(self, mock_pci, mock_update):
+ self._setup_rt()
+ node = mock.MagicMock()
+ node.deleted = True
+ node.uuid = str(uuids.host1)
+ node.host = 'fake-host'
+ context_mock = mock.MagicMock()
+ resources = {'hypervisor_hostname': 'fake-host',
+ 'uuid': str(uuids.host1)}
+ with mock.patch.object(self.rt, '_get_compute_node') as getcn:
+ getcn.return_value = node
+
+ # _init_compute_node() should return False to indicate that
+ # it found an existing node
+ self.assertFalse(
+ self.rt._init_compute_node(context_mock, resources))
+
+ # Node should be undeleted and saved
+ self.assertFalse(node.deleted)
+ self.assertIsNone(node.deleted_at)
+ node.save.assert_called_once_with()
+
+ # Host is the same, no _update()
+ self.assertEqual('fake-host', node.host)
+ mock_update.assert_not_called()
+
+ @mock.patch.object(resource_tracker.ResourceTracker,
+ '_update')
+ @mock.patch.object(resource_tracker.ResourceTracker,
+ '_setup_pci_tracker')
+ def test_undelete_node_move_host(self, mock_pci, mock_update):
+ self._setup_rt()
+ node = mock.MagicMock()
+ node.deleted = True
+ node.uuid = str(uuids.host1)
+ node.host = 'old-host'
+ context_mock = mock.MagicMock()
+ resources = {'hypervisor_hostname': 'fake-host',
+ 'uuid': str(uuids.host1)}
+ with mock.patch.object(self.rt, '_get_compute_node') as getcn:
+ getcn.return_value = node
+
+ # _init_compute_node() should return False to indicate that
+ # it found an existing node
+ self.assertFalse(
+ self.rt._init_compute_node(context_mock, resources))
+
+ # Node should be undeleted and saved
+ self.assertFalse(node.deleted)
+ self.assertIsNone(node.deleted_at)
+ node.save.assert_called_once_with()
+
+ # Our host changed, so we should have the updated value and have
+ # called _update()
+ self.assertEqual('fake-host', node.host)
+ mock_update.assert_called()
+
+ @mock.patch.object(resource_tracker.ResourceTracker,
+ '_get_compute_node',
+ return_value=None)
+ @mock.patch('nova.objects.compute_node.ComputeNode.create')
+ def test_create_failed_conflict(self, mock_create, mock_getcn):
+ self._setup_rt()
+ resources = {'hypervisor_hostname': 'node1',
+ 'uuid': uuids.node1}
+ mock_create.side_effect = exc.DuplicateRecord(target='foo')
+ self.assertRaises(exc.InvalidConfiguration,
+ self.rt._init_compute_node,
+ mock.MagicMock,
+ resources)
+
+@ddt.ddt
class TestUpdateComputeNode(BaseTestCase):
@mock.patch('nova.compute.resource_tracker.ResourceTracker.'
'_sync_compute_service_disabled_trait', new=mock.Mock())
@@ -1577,9 +1636,14 @@ class TestUpdateComputeNode(BaseTestCase):
self.rt._update(mock.sentinel.ctx, new_compute)
save_mock.assert_called_once_with()
+ @mock.patch('nova.objects.ComputeNode.save', new=mock.Mock())
+ @mock.patch(
+ 'nova.pci.stats.PciDeviceStats.has_remote_managed_device_pools',
+ return_value=True)
@mock.patch('nova.compute.resource_tracker.ResourceTracker.'
'_sync_compute_service_disabled_trait')
- def test_existing_node_capabilities_as_traits(self, mock_sync_disabled):
+ def test_existing_node_capabilities_as_traits(
+ self, mock_sync_disabled, mock_has_remote_managed_device_pools):
"""The capabilities_as_traits() driver method returns traits
information for a node/provider.
"""
@@ -1587,6 +1651,15 @@ class TestUpdateComputeNode(BaseTestCase):
rc = self.rt.reportclient
rc.set_traits_for_provider = mock.MagicMock()
+ # TODO(dmitriis): Remove once the PCI tracker is always created
+ # upon the resource tracker initialization.
+ with mock.patch.object(
+ objects.PciDeviceList, 'get_by_compute_node',
+ return_value=objects.PciDeviceList()
+ ):
+ self.rt.pci_tracker = pci_manager.PciDevTracker(
+ mock.sentinel.ctx, _COMPUTE_NODE_FIXTURES[0])
+
# Emulate a driver that has implemented the update_from_provider_tree()
# virt driver method
self.driver_mock.update_provider_tree = mock.Mock()
@@ -1694,12 +1767,18 @@ class TestUpdateComputeNode(BaseTestCase):
self.assertEqual(exp_inv, ptree.data(new_compute.uuid).inventory)
mock_sync_disabled.assert_called_once()
+ @ddt.data(
+ exc.ResourceProviderUpdateConflict(
+ uuid='uuid', generation=42, error='error'),
+ exc.PlacementReshapeConflict(error='error'),
+ )
@mock.patch('nova.compute.resource_tracker.ResourceTracker.'
'_sync_compute_service_disabled_trait')
@mock.patch('nova.compute.resource_tracker.ResourceTracker.'
'_resource_change', return_value=False)
- def test_update_retry_success(self, mock_resource_change,
- mock_sync_disabled):
+ def test_update_retry_success(
+ self, exc, mock_resource_change, mock_sync_disabled
+ ):
self._setup_rt()
orig_compute = _COMPUTE_NODE_FIXTURES[0].obj_clone()
self.rt.compute_nodes[_NODENAME] = orig_compute
@@ -1713,9 +1792,7 @@ class TestUpdateComputeNode(BaseTestCase):
self.driver_mock.update_provider_tree.side_effect = lambda *a: None
ufpt_mock = self.rt.reportclient.update_from_provider_tree
- ufpt_mock.side_effect = (
- exc.ResourceProviderUpdateConflict(
- uuid='uuid', generation=42, error='error'), None)
+ ufpt_mock.side_effect = (exc, None)
self.rt._update(mock.sentinel.ctx, new_compute)
@@ -1753,7 +1830,221 @@ class TestUpdateComputeNode(BaseTestCase):
self.assertEqual(4, ufpt_mock.call_count)
self.assertEqual(4, mock_sync_disabled.call_count)
# The retry is restricted to _update_to_placement
- self.assertEqual(1, mock_resource_change.call_count)
+ self.assertEqual(0, mock_resource_change.call_count)
+
+ @mock.patch(
+ 'nova.compute.resource_tracker.ResourceTracker.'
+ '_sync_compute_service_disabled_trait',
+ new=mock.Mock()
+ )
+ @mock.patch(
+ 'nova.compute.resource_tracker.ResourceTracker._resource_change',
+ new=mock.Mock(return_value=False)
+ )
+ @mock.patch(
+ 'nova.compute.pci_placement_translator.update_provider_tree_for_pci')
+ def test_update_pci_reporting(self, mock_update_provider_tree_for_pci):
+ """Assert that resource tracker calls update_provider_tree_for_pci
+ and that call did not change any allocations so
+ update_from_provider_tree called without triggering reshape
+ """
+ compute_obj = _COMPUTE_NODE_FIXTURES[0].obj_clone()
+ self._setup_rt()
+ ptree = self._setup_ptree(compute_obj)
+ # simulate that pci reporting did not touch allocations
+ mock_update_provider_tree_for_pci.return_value = False
+
+ self.rt._update(mock.sentinel.ctx, compute_obj)
+
+ mock_get_allocs = (
+ self.report_client_mock.get_allocations_for_provider_tree)
+ mock_get_allocs.assert_called_once_with(
+ mock.sentinel.ctx, compute_obj.hypervisor_hostname)
+ mock_update_provider_tree_for_pci.assert_called_once_with(
+ ptree,
+ compute_obj.hypervisor_hostname,
+ self.rt.pci_tracker,
+ mock_get_allocs.return_value,
+ [],
+ )
+ upt = self.rt.reportclient.update_from_provider_tree
+ upt.assert_called_once_with(mock.sentinel.ctx, ptree, allocations=None)
+
+ @mock.patch(
+ 'nova.compute.resource_tracker.ResourceTracker.'
+ '_sync_compute_service_disabled_trait',
+ new=mock.Mock()
+ )
+ @mock.patch(
+ 'nova.compute.resource_tracker.ResourceTracker._resource_change',
+ new=mock.Mock(return_value=False)
+ )
+ @mock.patch(
+ 'nova.compute.pci_placement_translator.update_provider_tree_for_pci')
+ def test_update_pci_reporting_reshape(
+ self, mock_update_provider_tree_for_pci
+ ):
+ """Assert that resource tracker calls update_provider_tree_for_pci
+ and that call changed allocations so
+ update_from_provider_tree called with allocations to trigger reshape
+ """
+ compute_obj = _COMPUTE_NODE_FIXTURES[0].obj_clone()
+ self._setup_rt()
+ ptree = self._setup_ptree(compute_obj)
+ # simulate that pci reporting changed some allocations
+ mock_update_provider_tree_for_pci.return_value = True
+
+ self.rt._update(mock.sentinel.ctx, compute_obj)
+
+ mock_get_allocs = (
+ self.report_client_mock.get_allocations_for_provider_tree)
+ mock_get_allocs.assert_called_once_with(
+ mock.sentinel.ctx, compute_obj.hypervisor_hostname)
+ mock_update_provider_tree_for_pci.assert_called_once_with(
+ ptree,
+ compute_obj.hypervisor_hostname,
+ self.rt.pci_tracker,
+ mock_get_allocs.return_value,
+ [],
+ )
+ upt = self.rt.reportclient.update_from_provider_tree
+ upt.assert_called_once_with(
+ mock.sentinel.ctx, ptree, allocations=mock_get_allocs.return_value)
+
+ @ddt.data(True, False)
+ @mock.patch(
+ 'nova.compute.resource_tracker.ResourceTracker.'
+ '_sync_compute_service_disabled_trait',
+ new=mock.Mock()
+ )
+ @mock.patch(
+ 'nova.compute.resource_tracker.ResourceTracker._resource_change',
+ new=mock.Mock(return_value=False)
+ )
+ @mock.patch(
+ 'nova.compute.pci_placement_translator.update_provider_tree_for_pci')
+ def test_update_pci_reporting_driver_reshape(
+ self, pci_reshape, mock_update_provider_tree_for_pci
+ ):
+ """Assert that resource tracker first called the
+ driver.update_provider_tree and that needed reshape so the allocations
+ are pulled. Then independently of update_provider_tree_for_pci the
+ update_from_provider_tree is called with the allocations to trigger
+ reshape in placement
+ """
+ compute_obj = _COMPUTE_NODE_FIXTURES[0].obj_clone()
+ self._setup_rt()
+ ptree = self._setup_ptree(compute_obj)
+ # simulate that the driver requests reshape
+ self.driver_mock.update_provider_tree.side_effect = [
+ exc.ReshapeNeeded, None]
+ mock_update_provider_tree_for_pci.return_value = pci_reshape
+
+ self.rt._update(mock.sentinel.ctx, compute_obj, startup=True)
+
+ mock_get_allocs = (
+ self.report_client_mock.get_allocations_for_provider_tree)
+ mock_get_allocs.assert_called_once_with(
+ mock.sentinel.ctx, compute_obj.hypervisor_hostname)
+ mock_update_provider_tree_for_pci.assert_called_once_with(
+ ptree,
+ compute_obj.hypervisor_hostname,
+ self.rt.pci_tracker,
+ mock_get_allocs.return_value,
+ [],
+ )
+ upt = self.rt.reportclient.update_from_provider_tree
+ upt.assert_called_once_with(
+ mock.sentinel.ctx, ptree, allocations=mock_get_allocs.return_value)
+
+ @mock.patch(
+ 'nova.compute.resource_tracker.ResourceTracker.'
+ '_sync_compute_service_disabled_trait',
+ new=mock.Mock()
+ )
+ @mock.patch(
+ 'nova.compute.resource_tracker.ResourceTracker._resource_change',
+ new=mock.Mock(return_value=False)
+ )
+ @mock.patch(
+ 'nova.compute.pci_placement_translator.update_provider_tree_for_pci')
+ def test_update_pci_reporting_same_host_resize(
+ self, mock_update_provider_tree_for_pci
+ ):
+ """Assert that resource tracker calls update_provider_tree_for_pci
+ and with the list of instances that are being resized to the same
+ host.
+ """
+ compute_obj = _COMPUTE_NODE_FIXTURES[0].obj_clone()
+ self._setup_rt()
+ ptree = self._setup_ptree(compute_obj)
+ # simulate that pci reporting did not touch allocations
+ mock_update_provider_tree_for_pci.return_value = False
+ self.rt.tracked_migrations = {
+ uuids.inst1: objects.Migration(
+ migration_type="resize",
+ source_node="fake-node",
+ dest_node="fake-node",
+ instance_uuid=uuids.inst1,
+ ),
+ uuids.inst2: objects.Migration(
+ migration_type="evacuation",
+ source_node="fake-node",
+ dest_node="fake-node",
+ instance_uuid=uuids.inst2,
+ ),
+ uuids.inst3: objects.Migration(
+ migration_type="resize",
+ source_node="fake-node1",
+ dest_node="fake-node2",
+ instance_uuid=uuids.inst3,
+ ),
+ }
+
+ self.rt._update(mock.sentinel.ctx, compute_obj)
+
+ mock_get_allocs = (
+ self.report_client_mock.get_allocations_for_provider_tree)
+ mock_get_allocs.assert_called_once_with(
+ mock.sentinel.ctx, compute_obj.hypervisor_hostname)
+ mock_update_provider_tree_for_pci.assert_called_once_with(
+ ptree,
+ compute_obj.hypervisor_hostname,
+ self.rt.pci_tracker,
+ mock_get_allocs.return_value,
+ [uuids.inst1],
+ )
+ upt = self.rt.reportclient.update_from_provider_tree
+ upt.assert_called_once_with(mock.sentinel.ctx, ptree, allocations=None)
+
+ @mock.patch(
+ 'nova.compute.resource_tracker.ResourceTracker.'
+ '_sync_compute_service_disabled_trait',
+ new=mock.Mock()
+ )
+ @mock.patch(
+ 'nova.compute.resource_tracker.ResourceTracker._resource_change',
+ new=mock.Mock(return_value=False)
+ )
+ def test_update_pci_reporting_allocation_in_use_error_propagated(self):
+ """Assert that if the pci placement reporting code tries to remove
+ inventory with allocation from placement due to invalid hypervisor
+ or [pci]device_spec reconfiguration then the InventoryInUse error from
+ placement is propagated and makes the compute startup fail.
+ """
+ compute_obj = _COMPUTE_NODE_FIXTURES[0].obj_clone()
+ self._setup_rt()
+ self.rt.reportclient.update_from_provider_tree.side_effect = (
+ exc.InventoryInUse(
+ resource_class="FOO", resource_provider="bar"))
+
+ self.assertRaises(
+ exc.PlacementPciException,
+ self.rt._update,
+ mock.sentinel.ctx,
+ compute_obj,
+ startup=True,
+ )
@mock.patch('nova.objects.Service.get_by_compute_host',
return_value=objects.Service(disabled=True))
@@ -1807,6 +2098,10 @@ class TestUpdateComputeNode(BaseTestCase):
self.assertIn('Unable to find services table record for nova-compute',
mock_log_error.call_args[0][0])
+ @mock.patch(
+ 'nova.compute.resource_tracker.ResourceTracker.'
+ '_update_to_placement',
+ new=mock.Mock())
def test_update_compute_node_save_fails_restores_old_resources(self):
"""Tests the scenario that compute_node.save() fails and the
old_resources value for the node is restored to its previous value
@@ -1938,14 +2233,19 @@ class TestInstanceClaim(BaseTestCase):
self.rt.compute_nodes = {}
self.assertTrue(self.rt.disabled(_NODENAME))
- with mock.patch.object(self.instance, 'save'):
- claim = self.rt.instance_claim(mock.sentinel.ctx, self.instance,
- _NODENAME, self.allocations, None)
+ # Reset all changes to the instance to make sure that we can detect
+ # any manipulation after the failure.
+ self.instance.obj_reset_changes(recursive=True)
- self.assertEqual(self.rt.host, self.instance.host)
- self.assertEqual(self.rt.host, self.instance.launched_on)
- self.assertEqual(_NODENAME, self.instance.node)
- self.assertIsInstance(claim, claims.NopClaim)
+ with mock.patch.object(self.instance, 'save') as mock_save:
+ self.assertRaises(exc.ComputeResourcesUnavailable,
+ self.rt.instance_claim,
+ mock.sentinel.ctx, self.instance,
+ _NODENAME, self.allocations, None)
+ mock_save.assert_not_called()
+
+ # Make sure the instance was not touched by the failed claim process
+ self.assertEqual(set(), self.instance.obj_what_changed())
@mock.patch('nova.compute.utils.is_volume_backed_instance')
@mock.patch('nova.objects.MigrationList.get_in_progress_and_error')
@@ -2111,26 +2411,45 @@ class TestInstanceClaim(BaseTestCase):
# PCI devices on the host and sends an updated pci_device_pools
# attribute of the ComputeNode object.
+ self.flags(
+ group="pci",
+ device_spec=[
+ jsonutils.dumps({"vendor_id": "0001", "product_id": "0002"})
+ ],
+ )
+ pci_dev = pci_device.PciDevice.create(
+ None,
+ dev_dict={
+ "compute_node_id": 1,
+ "address": "0000:81:00.0",
+ "product_id": "0002",
+ "vendor_id": "0001",
+ "numa_node": 0,
+ "dev_type": obj_fields.PciDeviceType.STANDARD,
+ "status": obj_fields.PciDeviceStatus.AVAILABLE,
+ "parent_addr": None,
+ },
+ )
+
+ pci_dev.instance_uuid = None
+ pci_devs = [pci_dev]
+
# TODO(jaypipes): Remove once the PCI tracker is always created
# upon the resource tracker being initialized...
with mock.patch.object(
objects.PciDeviceList, 'get_by_compute_node',
- return_value=objects.PciDeviceList()
+ return_value=objects.PciDeviceList(objects=pci_devs)
):
self.rt.pci_tracker = pci_manager.PciDevTracker(
mock.sentinel.ctx, _COMPUTE_NODE_FIXTURES[0])
- pci_dev = pci_device.PciDevice.create(
- None, fake_pci_device.dev_dict)
- pci_devs = [pci_dev]
- self.rt.pci_tracker.pci_devs = objects.PciDeviceList(objects=pci_devs)
-
request = objects.InstancePCIRequest(count=1,
- spec=[{'vendor_id': 'v', 'product_id': 'p'}])
+ spec=[{'vendor_id': '0001', 'product_id': '0002'}])
pci_requests = objects.InstancePCIRequests(
requests=[request],
instance_uuid=self.instance.uuid)
self.instance.pci_requests = pci_requests
+ self.instance.pci_devices = objects.PciDeviceList()
check_bfv_mock.return_value = False
disk_used = self.instance.root_gb + self.instance.ephemeral_gb
@@ -2142,7 +2461,20 @@ class TestInstanceClaim(BaseTestCase):
"free_ram_mb": expected.memory_mb - self.instance.memory_mb,
'running_vms': 1,
'vcpus_used': 1,
- 'pci_device_pools': objects.PciDevicePoolList(),
+ 'pci_device_pools': objects.PciDevicePoolList(
+ objects=[
+ objects.PciDevicePool(
+ vendor_id='0001',
+ product_id='0002',
+ numa_node=0,
+ tags={
+ 'dev_type': 'type-PCI',
+ 'address': '0000:81:00.0'
+ },
+ count=0
+ )
+ ]
+ ),
'stats': {
'io_workload': 0,
'num_instances': 1,
@@ -2159,7 +2491,8 @@ class TestInstanceClaim(BaseTestCase):
self.allocations, None)
cn = self.rt.compute_nodes[_NODENAME]
update_mock.assert_called_once_with(self.elevated, cn)
- pci_stats_mock.assert_called_once_with([request])
+ pci_stats_mock.assert_called_once_with(
+ [request], provider_mapping=None)
self.assertTrue(obj_base.obj_equal_prims(expected, cn))
@mock.patch('nova.compute.utils.is_volume_backed_instance',
@@ -2363,7 +2696,7 @@ class TestResize(BaseTestCase):
return_value=objects.InstancePCIRequests(requests=[]))
@mock.patch('nova.objects.PciDeviceList.get_by_compute_node',
return_value=objects.PciDeviceList())
- @mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
+ @mock.patch('nova.objects.ComputeNode.get_by_uuid')
@mock.patch('nova.objects.MigrationList.get_in_progress_and_error')
@mock.patch('nova.objects.InstanceList.get_by_host_and_node')
@mock.patch('nova.objects.ComputeNode.save')
@@ -2467,7 +2800,7 @@ class TestResize(BaseTestCase):
return_value=objects.InstancePCIRequests(requests=[]))
@mock.patch('nova.objects.PciDeviceList.get_by_compute_node',
return_value=objects.PciDeviceList())
- @mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename',
+ @mock.patch('nova.objects.ComputeNode.get_by_uuid',
return_value=_COMPUTE_NODE_FIXTURES[0])
@mock.patch('nova.objects.MigrationList.get_in_progress_and_error',
return_value=[])
@@ -2639,7 +2972,7 @@ class TestResize(BaseTestCase):
@mock.patch('nova.pci.manager.PciDevTracker.claim_instance')
@mock.patch('nova.pci.request.get_pci_requests_from_flavor')
@mock.patch('nova.objects.PciDeviceList.get_by_compute_node')
- @mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
+ @mock.patch('nova.objects.ComputeNode.get_by_uuid')
@mock.patch('nova.objects.MigrationList.get_in_progress_and_error')
@mock.patch('nova.objects.InstanceList.get_by_host_and_node')
@mock.patch('nova.objects.ComputeNode.save')
@@ -2809,7 +3142,7 @@ class TestResize(BaseTestCase):
return_value=objects.InstancePCIRequests(requests=[]))
@mock.patch('nova.objects.PciDeviceList.get_by_compute_node',
return_value=objects.PciDeviceList())
- @mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
+ @mock.patch('nova.objects.ComputeNode.get_by_uuid')
@mock.patch('nova.objects.MigrationList.get_in_progress_and_error')
@mock.patch('nova.objects.InstanceList.get_by_host_and_node')
@mock.patch('nova.objects.ComputeNode.save')
@@ -2941,7 +3274,7 @@ class TestRebuild(BaseTestCase):
return_value=objects.InstancePCIRequests(requests=[]))
@mock.patch('nova.objects.PciDeviceList.get_by_compute_node',
return_value=objects.PciDeviceList())
- @mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
+ @mock.patch('nova.objects.ComputeNode.get_by_uuid')
@mock.patch('nova.objects.MigrationList.get_in_progress_and_error')
@mock.patch('nova.objects.InstanceList.get_by_host_and_node')
@mock.patch('nova.objects.ComputeNode.save')
@@ -4046,7 +4379,7 @@ class ProviderConfigTestCases(BaseTestCase):
# add the same trait in p_tree and provider config
# for testing ignoring CUSTOM trait code logic.
- # If a programmer accidently forgets to ignore (substract)
+ # If a programmer accidentally forgets to ignore (subtract)
# existing custom traits, this test case will fail as we only expect
# "EXCEPTION_TRAIT" showed in ValueError exception rather than
# "EXCEPTION_TRAIT,CUSTOM_IGNORE_TRAIT"
@@ -4056,7 +4389,7 @@ class ProviderConfigTestCases(BaseTestCase):
expected = ("Provider config 'test_provider_config.yaml' attempts to "
"define a trait that is owned by the virt driver or "
- "specified via the placment api. Invalid traits '" +
+ "specified via the placement api. Invalid traits '" +
ex_trait + "' must be removed from "
"'test_provider_config.yaml'.")
@@ -4192,9 +4525,9 @@ class TestCleanComputeNodeCache(BaseTestCase):
invalid_nodename = "invalid-node"
self.rt.compute_nodes[_NODENAME] = self.compute
self.rt.compute_nodes[invalid_nodename] = mock.sentinel.compute
- with mock.patch.object(
- self.rt.reportclient, "invalidate_resource_provider",
- ) as mock_invalidate:
- self.rt.clean_compute_node_cache([self.compute])
- mock_remove.assert_called_once_with(invalid_nodename)
- mock_invalidate.assert_called_once_with(invalid_nodename)
+ mock_invalidate = self.rt.reportclient.invalidate_resource_provider
+
+ self.rt.clean_compute_node_cache([self.compute])
+
+ mock_remove.assert_called_once_with(invalid_nodename)
+ mock_invalidate.assert_called_once_with(invalid_nodename)
diff --git a/nova/tests/unit/compute/test_rpcapi.py b/nova/tests/unit/compute/test_rpcapi.py
index f062d5f45e..6f78678a92 100644
--- a/nova/tests/unit/compute/test_rpcapi.py
+++ b/nova/tests/unit/compute/test_rpcapi.py
@@ -16,7 +16,8 @@
Unit Tests for nova.compute.rpcapi
"""
-import mock
+from unittest import mock
+
from oslo_serialization import jsonutils
from oslo_utils.fixture import uuidsentinel as uuids
@@ -834,7 +835,9 @@ class ComputeRpcAPITestCase(test.NoDBTestCase):
bdms=[], instance=self.fake_instance_obj, host='new_host',
orig_sys_metadata=None, recreate=True, on_shared_storage=True,
preserve_ephemeral=True, migration=None, node=None,
- limits=None, request_spec=None, accel_uuids=[], version='6.0')
+ limits=None, request_spec=None, accel_uuids=[],
+ reimage_boot_volume=False, target_state=None,
+ version='6.2')
def test_rebuild_instance_old_rpcapi(self):
# With rpcapi < 5.12, accel_uuids must be dropped in the client call.
@@ -861,20 +864,95 @@ class ComputeRpcAPITestCase(test.NoDBTestCase):
'migration': None,
'limits': None
}
+ # Pass reimage_boot_volume to the client call...
compute_api.rebuild_instance(
ctxt, instance=self.fake_instance_obj,
accel_uuids=['938af7f9-f136-4e5a-bdbe-3b6feab54311'],
- node=None, host=None, **rebuild_args)
+ node=None, host=None, reimage_boot_volume=False,
+ target_state=None, **rebuild_args)
- mock_client.can_send_version.assert_has_calls([mock.call('6.0'),
+ mock_client.can_send_version.assert_has_calls([mock.call('6.2'),
+ mock.call('6.1'),
+ mock.call('6.0'),
mock.call('5.12')])
mock_client.prepare.assert_called_with(
server=self.fake_instance_obj.host, version='5.0')
+ # ...and assert that it does not show up on the wire before 6.1
mock_cctx.cast.assert_called_with( # No accel_uuids
ctxt, 'rebuild_instance',
instance=self.fake_instance_obj,
scheduled_node=None, **rebuild_args)
+ def test_rebuild_instance_vol_backed_old_rpcapi(self):
+ # With rpcapi < 6.1, if reimage_boot_volume is True then we
+ # should raise error.
+ ctxt = context.RequestContext('fake_user', 'fake_project')
+ compute_api = compute_rpcapi.ComputeAPI()
+ compute_api.router.client = mock.Mock()
+ mock_client = mock.MagicMock()
+ compute_api.router.client.return_value = mock_client
+ # Force can_send_version to [False, True, True], so that 6.0
+ # version is used.
+ mock_client.can_send_version.side_effect = [False, False, True, True]
+ mock_cctx = mock.MagicMock()
+ mock_client.prepare.return_value = mock_cctx
+ rebuild_args = {
+ 'new_pass': 'admin_password',
+ 'injected_files': 'files_to_inject',
+ 'image_ref': uuids.image_ref,
+ 'orig_image_ref': uuids.orig_image_ref,
+ 'orig_sys_metadata': 'orig_sys_meta',
+ 'bdms': {},
+ 'recreate': False,
+ 'on_shared_storage': False,
+ 'preserve_ephemeral': False,
+ 'request_spec': None,
+ 'migration': None,
+ 'limits': None,
+ 'accel_uuids': [],
+ 'reimage_boot_volume': True,
+ 'target_state': None,
+ }
+ self.assertRaises(
+ exception.NovaException, compute_api.rebuild_instance,
+ ctxt, instance=self.fake_instance_obj,
+ node=None, host=None, **rebuild_args)
+ mock_client.can_send_version.assert_has_calls([mock.call('6.2')])
+
+ def test_rebuild_instance_evacuate_old_rpcapi(self):
+ # With rpcapi < 6.2, if evacuate we should raise error.
+ ctxt = context.RequestContext('fake_user', 'fake_project')
+ compute_api = compute_rpcapi.ComputeAPI()
+ compute_api.router.client = mock.Mock()
+ mock_client = mock.MagicMock()
+ compute_api.router.client.return_value = mock_client
+ # Force can_send_version to return False.
+ mock_client.can_send_version.return_value = False
+ mock_cctx = mock.MagicMock()
+ mock_client.prepare.return_value = mock_cctx
+ rebuild_args = {
+ 'new_pass': 'admin_password',
+ 'injected_files': 'files_to_inject',
+ 'image_ref': uuids.image_ref,
+ 'orig_image_ref': uuids.orig_image_ref,
+ 'orig_sys_metadata': 'orig_sys_meta',
+ 'bdms': {},
+ 'recreate': False,
+ 'on_shared_storage': False,
+ 'preserve_ephemeral': False,
+ 'request_spec': None,
+ 'migration': None,
+ 'limits': None,
+ 'accel_uuids': [],
+ 'reimage_boot_volume': True,
+ 'target_state': 'stopped',
+ }
+ self.assertRaises(
+ exception.UnsupportedRPCVersion,
+ compute_api.rebuild_instance,
+ ctxt, instance=self.fake_instance_obj,
+ node=None, host=None, **rebuild_args)
+
def test_reserve_block_device_name(self):
self.flags(long_rpc_timeout=1234)
self._test_compute_api('reserve_block_device_name', 'call',
@@ -1237,7 +1315,7 @@ class ComputeRpcAPITestCase(test.NoDBTestCase):
def test_version_cap_all_cells_no_access(self, mock_allcells, mock_minver,
mock_log_error):
"""Tests a scenario where nova-compute is configured with a connection
- to the API database and fails trying to get the minium nova-compute
+ to the API database and fails trying to get the minimum nova-compute
service version across all cells because nova-compute is configured to
not allow direct database access.
"""
diff --git a/nova/tests/unit/compute/test_shelve.py b/nova/tests/unit/compute/test_shelve.py
index a50b4ca4de..62321bddec 100644
--- a/nova/tests/unit/compute/test_shelve.py
+++ b/nova/tests/unit/compute/test_shelve.py
@@ -10,8 +10,9 @@
# License for the specific language governing permissions and limitations
# under the License.
+from unittest import mock
+
import eventlet
-import mock
from oslo_utils import fixture as utils_fixture
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import timeutils
@@ -24,6 +25,7 @@ from nova.compute import task_states
from nova.compute import utils as compute_utils
from nova.compute import vm_states
import nova.conf
+from nova import context
from nova.db.main import api as db
from nova import exception
from nova.network import neutron as neutron_api
@@ -207,6 +209,7 @@ class ShelveComputeManagerTestCase(test_compute.BaseTestCase):
instance = self._shelve_offload(clean_shutdown=False)
mock_power_off.assert_called_once_with(instance, 0, 0)
+ @mock.patch.object(neutron_api.API, 'unbind_ports')
@mock.patch.object(compute_utils, 'EventReporter')
@mock.patch.object(objects.BlockDeviceMappingList, 'get_by_instance_uuid')
@mock.patch.object(nova.compute.manager.ComputeManager,
@@ -223,7 +226,7 @@ class ShelveComputeManagerTestCase(test_compute.BaseTestCase):
def _shelve_offload(self, mock_notify, mock_notify_instance_usage,
mock_get_power_state, mock_update_resource_tracker,
mock_delete_alloc, mock_terminate, mock_get_bdms,
- mock_event, clean_shutdown=True):
+ mock_event, mock_unbind_ports, clean_shutdown=True):
host = 'fake-mini'
instance = self._create_fake_instance_obj(params={'host': host})
instance.task_state = task_states.SHELVING
@@ -276,10 +279,13 @@ class ShelveComputeManagerTestCase(test_compute.BaseTestCase):
instance.uuid,
graceful_exit=False)
+ mock_unbind_ports.assert_called_once_with(
+ self.context, mock.ANY, detach=False)
+
return instance
@mock.patch('nova.compute.utils.'
- 'update_pci_request_spec_with_allocated_interface_name',
+ 'update_pci_request_with_placement_allocations',
new=mock.NonCallableMock())
@mock.patch('nova.objects.BlockDeviceMappingList.get_by_instance_uuid')
@mock.patch('nova.compute.utils.notify_about_instance_action')
@@ -629,7 +635,7 @@ class ShelveComputeManagerTestCase(test_compute.BaseTestCase):
@mock.patch('nova.network.neutron.API.setup_instance_network_on_host')
@mock.patch('nova.compute.utils.'
- 'update_pci_request_spec_with_allocated_interface_name')
+ 'update_pci_request_with_placement_allocations')
def test_unshelve_with_resource_request(
self, mock_update_pci, mock_setup_network):
requested_res = [objects.RequestGroup(
@@ -640,7 +646,7 @@ class ShelveComputeManagerTestCase(test_compute.BaseTestCase):
self.compute.unshelve_instance(
self.context, instance, image=None,
- filter_properties={}, node='fake-node', request_spec=request_spec,
+ filter_properties={}, node='fakenode2', request_spec=request_spec,
accel_uuids=[])
mock_update_pci.assert_called_once_with(
@@ -653,7 +659,7 @@ class ShelveComputeManagerTestCase(test_compute.BaseTestCase):
@mock.patch('nova.network.neutron.API.setup_instance_network_on_host',
new=mock.NonCallableMock())
@mock.patch('nova.compute.utils.'
- 'update_pci_request_spec_with_allocated_interface_name')
+ 'update_pci_request_with_placement_allocations')
def test_unshelve_with_resource_request_update_raises(
self, mock_update_pci):
requested_res = [objects.RequestGroup(
@@ -694,7 +700,7 @@ class ShelveComputeManagerTestCase(test_compute.BaseTestCase):
self.assertRaises(test.TestingException,
self.compute.unshelve_instance, self.context, instance,
image=shelved_image, filter_properties={},
- node='fake-node', request_spec=fake_spec, accel_uuids=[])
+ node='fakenode2', request_spec=fake_spec, accel_uuids=[])
self.assertEqual(instance.image_ref, initial_image_ref)
@mock.patch.object(objects.InstanceList, 'get_by_filters')
@@ -849,9 +855,67 @@ class ShelveComputeAPITestCase(test_compute.BaseTestCase):
exclude_states = set()
return vm_state - exclude_states
+ @mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
+ 'aggregate_add_host')
+ @mock.patch('nova.availability_zones.get_availability_zones')
+ def _create_host_inside_az(
+ self,
+ ctxt,
+ host,
+ az,
+ mock_az,
+ mock_aggregate,
+ ):
+
+ self.api = compute_api.AggregateAPI()
+ mock_az.return_value = [az]
+
+ cells = objects.CellMappingList.get_all(ctxt)
+ cell = cells[0]
+ with context.target_cell(ctxt, cell) as cctxt:
+ s = objects.Service(context=cctxt,
+ host=host,
+ binary='nova-compute',
+ topic='compute',
+ report_count=0)
+ s.create()
+
+ hm = objects.HostMapping(context=ctxt,
+ cell_mapping=cell,
+ host=host)
+ hm.create()
+
+ self._init_aggregate_with_host(None, 'fake_aggregate1',
+ az, host)
+
+ def _create_request_spec_for_initial_az(self, az):
+ fake_spec = objects.RequestSpec()
+ fake_spec.availability_zone = az
+ return fake_spec
+
+ def _assert_unshelving_and_request_spec_az_and_host(
+ self,
+ context,
+ instance,
+ fake_spec,
+ fake_zone,
+ fake_host,
+ mock_get_by_instance_uuid,
+ mock_unshelve
+ ):
+ mock_get_by_instance_uuid.assert_called_once_with(context,
+ instance.uuid)
+
+ mock_unshelve.assert_called_once_with(context, instance, fake_spec)
+
+ self.assertEqual(instance.task_state, task_states.UNSHELVING)
+ self.assertEqual(fake_spec.availability_zone, fake_zone)
+ if fake_host:
+ self.assertEqual(fake_spec.requested_destination.host, fake_host)
+
def _test_shelve(self, vm_state=vm_states.ACTIVE, boot_from_volume=False,
clean_shutdown=True):
- # Ensure instance can be shelved.
+
params = dict(task_state=None, vm_state=vm_state, display_name='vm01')
fake_instance = self._create_fake_instance_obj(params=params)
instance = fake_instance
@@ -988,12 +1052,14 @@ class ShelveComputeAPITestCase(test_compute.BaseTestCase):
return instance
+ @mock.patch.object(objects.RequestSpec, 'save')
@mock.patch.object(objects.RequestSpec, 'get_by_instance_uuid')
- def test_unshelve(self, get_by_instance_uuid):
+ def test_unshelve(self, get_by_instance_uuid, fake_save):
# Ensure instance can be unshelved.
instance = self._get_specify_state_instance(vm_states.SHELVED)
fake_spec = objects.RequestSpec()
+ fake_spec.availability_zone = None
get_by_instance_uuid.return_value = fake_spec
with mock.patch.object(self.compute_api.compute_task_api,
'unshelve_instance') as unshelve:
@@ -1116,24 +1182,558 @@ class ShelveComputeAPITestCase(test_compute.BaseTestCase):
mock_get_bdms.assert_called_once_with(self.context, instance.uuid)
mock_get.assert_called_once_with(self.context, uuids.volume_id)
- @mock.patch.object(compute_api.API, '_validate_unshelve_az')
+# Next tests attempt to check the following behavior
+# +----------+---------------------------+-------+----------------------------+
+# | Boot | Unshelve after offload AZ | Host | Result |
+# +==========+===========================+=======+============================+
+# | No AZ | No AZ or AZ=null | No | Free scheduling, |
+# | | | | reqspec.AZ=None |
+# +----------+---------------------------+-------+----------------------------+
+# | No AZ | No AZ or AZ=null | Host1 | Schedule to host1, |
+# | | | | reqspec.AZ=None |
+# +----------+---------------------------+-------+----------------------------+
+# | No AZ | AZ="AZ1" | No | Schedule to AZ1, |
+# | | | | reqspec.AZ="AZ1" |
+# +----------+---------------------------+-------+----------------------------+
+# | No AZ | AZ="AZ1" | Host1 | Verify that host1 in AZ1, |
+# | | | | or (1). Schedule to |
+# | | | | host1, reqspec.AZ="AZ1" |
+# +----------+---------------------------+-------+----------------------------+
+# | AZ1 | No AZ | No | Schedule to AZ1, |
+# | | | | reqspec.AZ="AZ1" |
+# +----------+---------------------------+-------+----------------------------+
+# | AZ1 | AZ=null | No | Free scheduling, |
+# | | | | reqspec.AZ=None |
+# +----------+---------------------------+-------+----------------------------+
+# | AZ1 | No AZ | Host1 | If host1 is in AZ1, |
+# | | | | then schedule to host1, |
+# | | | | reqspec.AZ="AZ1", otherwise|
+# | | | | reject the request (1) |
+# +----------+---------------------------+-------+----------------------------+
+# | AZ1 | AZ=null | Host1 | Schedule to host1, |
+# | | | | reqspec.AZ=None |
+# +----------+---------------------------+-------+----------------------------+
+# | AZ1 | AZ="AZ2" | No | Schedule to AZ2, |
+# | | | | reqspec.AZ="AZ2" |
+# +----------+---------------------------+-------+----------------------------+
+# | AZ1 | AZ="AZ2" | Host1 | If host1 in AZ2 then |
+# | | | | schedule to host1, |
+# | | | | reqspec.AZ="AZ2", |
+# | | | | otherwise reject (1) |
+# +----------+---------------------------+-------+----------------------------+
+#
+# (1) Check at the api and return an error.
+#
+#
+# +----------+---------------------------+-------+----------------------------+
+# | No AZ | No AZ or AZ=null | No | Free scheduling, |
+# | | | | reqspec.AZ=None |
+# +----------+---------------------------+-------+----------------------------+
+ @mock.patch.object(nova.conductor.ComputeTaskAPI, 'unshelve_instance')
@mock.patch.object(objects.RequestSpec, 'save')
+ @mock.patch.object(objects.ComputeNodeList, 'get_all_by_host')
@mock.patch.object(objects.RequestSpec, 'get_by_instance_uuid')
- def test_specified_az_unshelve(self, get_by_instance_uuid,
- mock_save, mock_validate_unshelve_az):
- # Ensure instance can be unshelved.
+ def test_unshelve_without_az(
+ self,
+ mock_get_by_instance_uuid,
+ mock_get_all_by_host,
+ mock_save,
+ mock_unshelve
+ ):
+
+ context = self.context.elevated()
+ fake_host = 'fake_host1'
+ fake_zone = 'avail_zone1'
+ self._create_host_inside_az(self.context, fake_host, fake_zone)
+
instance = self._get_specify_state_instance(
vm_states.SHELVED_OFFLOADED)
- new_az = "west_az"
- fake_spec = objects.RequestSpec()
- fake_spec.availability_zone = "fake-old-az"
- get_by_instance_uuid.return_value = fake_spec
+ fake_spec = self._create_request_spec_for_initial_az(None)
+ mock_get_by_instance_uuid.return_value = fake_spec
+
+ self.compute_api.unshelve(context, instance)
+
+ self._assert_unshelving_and_request_spec_az_and_host(
+ context,
+ instance,
+ fake_spec,
+ None,
+ None,
+ mock_get_by_instance_uuid,
+ mock_unshelve
+ )
+
+# +----------+---------------------------+-------+----------------------------+
+# | No AZ | No AZ or AZ=null | Host1 | Schedule to host1, |
+# | | | | reqspec.AZ=None |
+# +----------+---------------------------+-------+----------------------------+
+ @mock.patch.object(nova.conductor.ComputeTaskAPI, 'unshelve_instance')
+ @mock.patch.object(objects.RequestSpec, 'save')
+ @mock.patch.object(objects.ComputeNodeList, 'get_all_by_host')
+ @mock.patch.object(objects.RequestSpec, 'get_by_instance_uuid')
+ def test_unshelve_without_az_to_host(
+ self,
+ mock_get_by_instance_uuid,
+ mock_get_all_by_host,
+ mock_save,
+ mock_unshelve
+ ):
+
+ context = self.context.elevated()
+ fake_host = 'fake_host1'
+ fake_zone = 'avail_zone1'
+ self._create_host_inside_az(self.context, fake_host, fake_zone)
+
+ instance = self._get_specify_state_instance(
+ vm_states.SHELVED_OFFLOADED)
+
+ fake_spec = self._create_request_spec_for_initial_az(None)
+ mock_get_by_instance_uuid.return_value = fake_spec
+
+ self.compute_api.unshelve(context, instance, host=fake_host)
+
+ self._assert_unshelving_and_request_spec_az_and_host(
+ context,
+ instance,
+ fake_spec,
+ None,
+ fake_host,
+ mock_get_by_instance_uuid,
+ mock_unshelve
+ )
+
+# +----------+---------------------------+-------+----------------------------+
+# | No AZ | AZ="AZ1" | No | Schedule to AZ1, |
+# | | | | reqspec.AZ="AZ1" |
+# +----------+---------------------------+-------+----------------------------+
+ @mock.patch.object(nova.conductor.ComputeTaskAPI, 'unshelve_instance')
+ @mock.patch.object(objects.RequestSpec, 'save')
+ @mock.patch.object(objects.ComputeNodeList, 'get_all_by_host')
+ @mock.patch.object(objects.RequestSpec, 'get_by_instance_uuid')
+ def test_unshelve_without_az_to_newaz(
+ self,
+ mock_get_by_instance_uuid,
+ mock_get_all_by_host,
+ mock_save,
+ mock_unshelve
+ ):
+
+ context = self.context.elevated()
+ fake_host = 'fake_host1'
+ fake_zone = 'avail_zone1'
+ self._create_host_inside_az(self.context, fake_host, fake_zone)
+
+ instance = self._get_specify_state_instance(
+ vm_states.SHELVED_OFFLOADED)
+
+ fake_spec = self._create_request_spec_for_initial_az(None)
+ mock_get_by_instance_uuid.return_value = fake_spec
+
+ self.compute_api.unshelve(context, instance, new_az=fake_zone)
+
+ self._assert_unshelving_and_request_spec_az_and_host(
+ context,
+ instance,
+ fake_spec,
+ fake_zone,
+ None,
+ mock_get_by_instance_uuid,
+ mock_unshelve
+ )
+
+# +----------+---------------------------+-------+----------------------------+
+# | No AZ | AZ="AZ1" | Host1 | Verify that host1 in AZ1, |
+# | | | | or (1). Schedule to |
+# | | | | host1, reqspec.AZ="AZ1" |
+# +----------+---------------------------+-------+----------------------------+
+ @mock.patch.object(nova.conductor.ComputeTaskAPI, 'unshelve_instance')
+ @mock.patch.object(objects.RequestSpec, 'save')
+ @mock.patch.object(objects.ComputeNodeList, 'get_all_by_host')
+ @mock.patch.object(objects.RequestSpec, 'get_by_instance_uuid')
+ def test_unshelve_without_az_to_newaz_and_host(
+ self,
+ mock_get_by_instance_uuid,
+ mock_get_all_by_host,
+ mock_save,
+ mock_unshelve
+ ):
+
+ context = self.context.elevated()
+ fake_host = 'fake_host1'
+ fake_zone = 'avail_zone1'
+ self._create_host_inside_az(self.context, fake_host, fake_zone)
+
+ instance = self._get_specify_state_instance(
+ vm_states.SHELVED_OFFLOADED)
+
+ fake_spec = self._create_request_spec_for_initial_az(None)
+ mock_get_by_instance_uuid.return_value = fake_spec
+
+ self.compute_api.unshelve(
+ context, instance, new_az=fake_zone, host=fake_host
+ )
+
+ self._assert_unshelving_and_request_spec_az_and_host(
+ context,
+ instance,
+ fake_spec,
+ fake_zone,
+ fake_host,
+ mock_get_by_instance_uuid,
+ mock_unshelve
+ )
+
+ @mock.patch.object(nova.conductor.ComputeTaskAPI, 'unshelve_instance')
+ @mock.patch.object(objects.RequestSpec, 'save')
+ @mock.patch.object(objects.ComputeNodeList, 'get_all_by_host')
+ @mock.patch.object(objects.RequestSpec, 'get_by_instance_uuid')
+ def test_unshelve_without_az_to_newaz_and_host_invalid(
+ self,
+ mock_get_by_instance_uuid,
+ mock_get_all_by_host,
+ mock_save,
+ mock_unshelve
+ ):
+
+ context = self.context.elevated()
+ fake_host = 'fake_host1'
+ fake_zone = 'avail_zone1'
+ self._create_host_inside_az(self.context, fake_host, fake_zone)
+
+ instance = self._get_specify_state_instance(
+ vm_states.SHELVED_OFFLOADED)
+
+ fake_spec = self._create_request_spec_for_initial_az(None)
+ mock_get_by_instance_uuid.return_value = fake_spec
+
+ exc = self.assertRaises(
+ nova.exception.UnshelveHostNotInAZ,
+ self.compute_api.unshelve,
+ context,
+ instance,
+ new_az='avail_zone1',
+ host='fake_mini'
+ )
+
+ self.assertIn(
+ exc.message,
+ 'Host "fake_mini" is not in the availability zone "avail_zone1".'
+ )
+
+# +----------+---------------------------+-------+----------------------------+
+# | AZ1 | No AZ | No | Schedule to AZ1, |
+# | | | | reqspec.AZ="AZ1" |
+# +----------+---------------------------+-------+----------------------------+
+ @mock.patch.object(nova.conductor.ComputeTaskAPI, 'unshelve_instance')
+ @mock.patch.object(objects.RequestSpec, 'save')
+ @mock.patch.object(objects.ComputeNodeList, 'get_all_by_host')
+ @mock.patch.object(objects.RequestSpec, 'get_by_instance_uuid')
+ def test_unshelve_with_az(
+ self,
+ mock_get_by_instance_uuid,
+ mock_get_all_by_host,
+ mock_save,
+ mock_unshelve
+ ):
+
+ context = self.context.elevated()
+ fake_host = 'fake_host1'
+ fake_zone = 'avail_zone1'
+ self._create_host_inside_az(self.context, fake_host, fake_zone)
+
+ instance = self._get_specify_state_instance(
+ vm_states.SHELVED_OFFLOADED)
+
+ fake_spec = self._create_request_spec_for_initial_az(fake_zone)
+ mock_get_by_instance_uuid.return_value = fake_spec
+
+ self.compute_api.unshelve(context, instance)
+
+ self._assert_unshelving_and_request_spec_az_and_host(
+ context,
+ instance,
+ fake_spec,
+ fake_zone,
+ None,
+ mock_get_by_instance_uuid,
+ mock_unshelve
+ )
+
+# +----------+---------------------------+-------+----------------------------+
+# | AZ1 | AZ=null | No | Free scheduling, |
+# | | | | reqspec.AZ=None |
+# +----------+---------------------------+-------+----------------------------+
+ @mock.patch.object(nova.conductor.ComputeTaskAPI, 'unshelve_instance')
+ @mock.patch.object(objects.RequestSpec, 'save')
+ @mock.patch.object(objects.ComputeNodeList, 'get_all_by_host')
+ @mock.patch.object(objects.RequestSpec, 'get_by_instance_uuid')
+ def test_unshelve_with_az_to_unpin_az(
+ self,
+ mock_get_by_instance_uuid,
+ mock_get_all_by_host,
+ mock_save,
+ mock_unshelve
+ ):
+
+ context = self.context.elevated()
+ fake_host = 'fake_host1'
+ fake_zone = 'avail_zone1'
+ self._create_host_inside_az(self.context, fake_host, fake_zone)
+
+ instance = self._get_specify_state_instance(
+ vm_states.SHELVED_OFFLOADED)
+
+ fake_spec = self._create_request_spec_for_initial_az(fake_zone)
+ mock_get_by_instance_uuid.return_value = fake_spec
+
+ self.compute_api.unshelve(context, instance, new_az=None)
+
+ self._assert_unshelving_and_request_spec_az_and_host(
+ context,
+ instance,
+ fake_spec,
+ None,
+ None,
+ mock_get_by_instance_uuid,
+ mock_unshelve
+ )
+
+# +----------+---------------------------+-------+----------------------------+
+# | AZ1 | No AZ | Host1 | If host1 is in AZ1, |
+# | | | | then schedule to host1, |
+# | | | | reqspec.AZ="AZ1", otherwise|
+# | | | | reject the request (1) |
+# +----------+---------------------------+-------+----------------------------+
+ @mock.patch.object(nova.conductor.ComputeTaskAPI, 'unshelve_instance')
+ @mock.patch.object(objects.RequestSpec, 'save')
+ @mock.patch.object(objects.ComputeNodeList, 'get_all_by_host')
+ @mock.patch.object(objects.RequestSpec, 'get_by_instance_uuid')
+ def test_unshelve_with_az_to_host_in_az(
+ self,
+ mock_get_by_instance_uuid,
+ mock_get_all_by_host,
+ mock_save,
+ mock_unshelve
+ ):
+
+ context = self.context.elevated()
+ fake_host = 'fake_host1'
+ fake_zone = 'avail_zone1'
+ self._create_host_inside_az(self.context, fake_host, fake_zone)
+
+ instance = self._get_specify_state_instance(
+ vm_states.SHELVED_OFFLOADED)
+
+ fake_spec = self._create_request_spec_for_initial_az(fake_zone)
+ mock_get_by_instance_uuid.return_value = fake_spec
- self.compute_api.unshelve(self.context, instance, new_az=new_az)
+ self.compute_api.unshelve(context, instance, host=fake_host)
- mock_save.assert_called_once_with()
- self.assertEqual(new_az, fake_spec.availability_zone)
+ self._assert_unshelving_and_request_spec_az_and_host(
+ context,
+ instance,
+ fake_spec,
+ fake_zone,
+ fake_host,
+ mock_get_by_instance_uuid,
+ mock_unshelve
+ )
+
+ @mock.patch.object(nova.conductor.ComputeTaskAPI, 'unshelve_instance')
+ @mock.patch.object(objects.RequestSpec, 'save')
+ @mock.patch.object(objects.ComputeNodeList, 'get_all_by_host')
+ @mock.patch.object(objects.RequestSpec, 'get_by_instance_uuid')
+ def test_unshelve_with_az_to_invalid_host(
+ self,
+ mock_get_by_instance_uuid,
+ mock_get_all_by_host,
+ mock_save,
+ mock_unshelve
+ ):
+
+ context = self.context.elevated()
+ fake_host = 'fake_host1'
+ fake_zone = 'avail_zone1'
+ self._create_host_inside_az(self.context, fake_host, fake_zone)
+
+ instance = self._get_specify_state_instance(
+ vm_states.SHELVED_OFFLOADED)
+
+ fake_spec = self._create_request_spec_for_initial_az(fake_zone)
+ mock_get_by_instance_uuid.return_value = fake_spec
+
+ exc = self.assertRaises(
+ nova.exception.UnshelveHostNotInAZ,
+ self.compute_api.unshelve,
+ context,
+ instance,
+ host='fake_mini'
+ )
+
+ self.assertIn(
+ exc.message,
+ 'Host "fake_mini" is not in the availability zone "avail_zone1".'
+ )
+
+# +----------+---------------------------+-------+----------------------------+
+# | AZ1 | AZ=null | Host1 | Schedule to host1, |
+# | | | | reqspec.AZ=None |
+# +----------+---------------------------+-------+----------------------------+
+ @mock.patch.object(nova.conductor.ComputeTaskAPI, 'unshelve_instance')
+ @mock.patch.object(objects.RequestSpec, 'save')
+ @mock.patch.object(objects.ComputeNodeList, 'get_all_by_host')
+ @mock.patch.object(objects.RequestSpec, 'get_by_instance_uuid')
+ def test_unshelve_with_az_to_host_unpin_az(
+ self,
+ mock_get_by_instance_uuid,
+ mock_get_all_by_host,
+ mock_save,
+ mock_unshelve
+ ):
+
+ context = self.context.elevated()
+ fake_host = 'fake_host1'
+ fake_zone = 'avail_zone1'
+ self._create_host_inside_az(self.context, fake_host, fake_zone)
+
+ instance = self._get_specify_state_instance(
+ vm_states.SHELVED_OFFLOADED)
- mock_validate_unshelve_az.assert_called_once_with(
- self.context, instance, new_az)
+ fake_spec = self._create_request_spec_for_initial_az(fake_zone)
+ mock_get_by_instance_uuid.return_value = fake_spec
+
+ self.compute_api.unshelve(
+ context, instance, new_az=None, host=fake_host
+ )
+
+ self._assert_unshelving_and_request_spec_az_and_host(
+ context,
+ instance,
+ fake_spec,
+ None,
+ fake_host,
+ mock_get_by_instance_uuid,
+ mock_unshelve
+ )
+
+# +----------+---------------------------+-------+----------------------------+
+# | AZ1 | AZ="AZ2" | No | Schedule to AZ2, |
+# | | | | reqspec.AZ="AZ2" |
+# +----------+---------------------------+-------+----------------------------+
+ @mock.patch.object(nova.conductor.ComputeTaskAPI, 'unshelve_instance')
+ @mock.patch.object(objects.RequestSpec, 'save')
+ @mock.patch.object(objects.ComputeNodeList, 'get_all_by_host')
+ @mock.patch.object(objects.RequestSpec, 'get_by_instance_uuid')
+ def test_unshelve_with_az_to_newaz(
+ self,
+ mock_get_by_instance_uuid,
+ mock_get_all_by_host,
+ mock_save,
+ mock_unshelve
+ ):
+
+ context = self.context.elevated()
+ fake_host = 'fake_host1'
+ fake_zone = 'avail_zone1'
+ self._create_host_inside_az(self.context, fake_host, fake_zone)
+
+ instance = self._get_specify_state_instance(
+ vm_states.SHELVED_OFFLOADED)
+
+ fake_spec = self._create_request_spec_for_initial_az('az1')
+ mock_get_by_instance_uuid.return_value = fake_spec
+
+ self.compute_api.unshelve(
+ context, instance, new_az=fake_zone
+ )
+
+ self._assert_unshelving_and_request_spec_az_and_host(
+ context,
+ instance,
+ fake_spec,
+ fake_zone,
+ None,
+ mock_get_by_instance_uuid,
+ mock_unshelve
+ )
+
+# +----------+---------------------------+-------+----------------------------+
+# | AZ1 | AZ="AZ2" | Host1 | If host1 in AZ2 then |
+# | | | | schedule to host1, |
+# | | | | reqspec.AZ="AZ2", |
+# | | | | otherwise reject (1) |
+# +----------+---------------------------+-------+----------------------------+
+ @mock.patch.object(nova.conductor.ComputeTaskAPI, 'unshelve_instance')
+ @mock.patch.object(objects.RequestSpec, 'save')
+ @mock.patch.object(objects.ComputeNodeList, 'get_all_by_host')
+ @mock.patch.object(objects.RequestSpec, 'get_by_instance_uuid')
+ def test_unshelve_with_az_to_newaz_and_host(
+ self,
+ mock_get_by_instance_uuid,
+ mock_get_all_by_host,
+ mock_save,
+ mock_unshelve
+ ):
+
+ context = self.context.elevated()
+ fake_host = 'fake_host1'
+ fake_zone = 'avail_zone1'
+ self._create_host_inside_az(self.context, fake_host, fake_zone)
+
+ instance = self._get_specify_state_instance(
+ vm_states.SHELVED_OFFLOADED)
+
+ fake_spec = self._create_request_spec_for_initial_az('az1')
+ mock_get_by_instance_uuid.return_value = fake_spec
+
+ self.compute_api.unshelve(
+ context, instance, new_az=fake_zone, host=fake_host
+ )
+
+ self._assert_unshelving_and_request_spec_az_and_host(
+ context,
+ instance,
+ fake_spec,
+ fake_zone,
+ fake_host,
+ mock_get_by_instance_uuid,
+ mock_unshelve
+ )
+
+ @mock.patch.object(nova.conductor.ComputeTaskAPI, 'unshelve_instance')
+ @mock.patch.object(objects.RequestSpec, 'save')
+ @mock.patch.object(objects.ComputeNodeList, 'get_all_by_host')
+ @mock.patch.object(objects.RequestSpec, 'get_by_instance_uuid')
+ def test_unshelve_with_az_to_newaz_and_invalid_host(
+ self,
+ mock_get_by_instance_uuid,
+ mock_get_all_by_host,
+ mock_save,
+ mock_unshelve
+ ):
+
+ context = self.context.elevated()
+ fake_host = 'fake_host1'
+ fake_zone = 'avail_zone1'
+ self._create_host_inside_az(self.context, fake_host, fake_zone)
+
+ instance = self._get_specify_state_instance(
+ vm_states.SHELVED_OFFLOADED)
+
+ fake_spec = self._create_request_spec_for_initial_az('az1')
+ mock_get_by_instance_uuid.return_value = fake_spec
+
+ exc = self.assertRaises(
+ nova.exception.UnshelveHostNotInAZ,
+ self.compute_api.unshelve,
+ context,
+ instance,
+ new_az=fake_zone,
+ host='fake_mini'
+ )
+
+ self.assertIn(
+ exc.message,
+ 'Host "fake_mini" is not in the availability zone "avail_zone1".'
+ )
diff --git a/nova/tests/unit/compute/test_utils.py b/nova/tests/unit/compute/test_utils.py
index 6c3cbc1b57..dd10ecd7df 100644
--- a/nova/tests/unit/compute/test_utils.py
+++ b/nova/tests/unit/compute/test_utils.py
@@ -19,8 +19,8 @@
import copy
import datetime
import string
+from unittest import mock
-import mock
from oslo_serialization import jsonutils
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import uuidutils
@@ -1558,47 +1558,86 @@ class PciRequestUpdateTestCase(test.NoDBTestCase):
def test_no_pci_request(self):
provider_mapping = {}
- compute_utils.update_pci_request_spec_with_allocated_interface_name(
+ compute_utils.update_pci_request_with_placement_allocations(
self.context, mock.sentinel.report_client, [], provider_mapping)
- def test_pci_request_from_flavor(self):
- pci_requests = [objects.InstancePCIRequest(requester_id=None)]
+ def test_pci_request_from_flavor_no_mapping(self):
+ req = objects.InstancePCIRequest(
+ requester_id=None,
+ request_id=uuids.req1,
+ alias_name="a-dev",
+ spec=[{}],
+ )
+ pci_requests = [req]
+
provider_mapping = {}
- compute_utils.update_pci_request_spec_with_allocated_interface_name(
+ compute_utils.update_pci_request_with_placement_allocations(
+ self.context, mock.sentinel.report_client, pci_requests,
+ provider_mapping)
+
+ self.assertNotIn('rp_uuids', req.spec[0])
+
+ def test_pci_request_from_flavor_with_mapping(self):
+ req = objects.InstancePCIRequest(
+ requester_id=None,
+ request_id=uuids.req1,
+ alias_name="a-dev",
+ spec=[{}],
+ )
+ pci_requests = [req]
+
+ provider_mapping = {
+ f"{uuids.req1}-0": [uuids.rp1],
+ f"{uuids.req1}-1": [uuids.rp2],
+ }
+
+ compute_utils.update_pci_request_with_placement_allocations(
self.context, mock.sentinel.report_client, pci_requests,
provider_mapping)
+ self.assertEqual(
+ {uuids.rp1, uuids.rp2}, set(req.spec[0]["rp_uuids"].split(','))
+ )
+
def test_pci_request_has_no_mapping(self):
pci_requests = [objects.InstancePCIRequest(requester_id=uuids.port_1)]
provider_mapping = {}
- compute_utils.update_pci_request_spec_with_allocated_interface_name(
+ compute_utils.update_pci_request_with_placement_allocations(
self.context, mock.sentinel.report_client, pci_requests,
provider_mapping)
def test_pci_request_ambiguous_mapping(self):
- pci_requests = [objects.InstancePCIRequest(requester_id=uuids.port_1)]
+ pci_requests = [
+ objects.InstancePCIRequest(
+ requester_id=uuids.port_1, request_id=uuids.req1
+ )
+ ]
provider_mapping = {uuids.port_1: [uuids.rp1, uuids.rp2]}
self.assertRaises(
exception.AmbiguousResourceProviderForPCIRequest,
(compute_utils.
- update_pci_request_spec_with_allocated_interface_name),
+ update_pci_request_with_placement_allocations),
self.context, mock.sentinel.report_client, pci_requests,
provider_mapping)
def test_unexpected_provider_name(self):
report_client = mock.Mock(spec=report.SchedulerReportClient)
report_client.get_resource_provider_name.return_value = 'unexpected'
- pci_requests = [objects.InstancePCIRequest(
- requester_id=uuids.port_1, spec=[{}])]
+ pci_requests = [
+ objects.InstancePCIRequest(
+ requester_id=uuids.port_1, spec=[{}], request_id=uuids.req1
+ )
+ ]
+
provider_mapping = {uuids.port_1: [uuids.rp1]}
self.assertRaises(
exception.UnexpectedResourceProviderNameForPCIRequest,
(compute_utils.
- update_pci_request_spec_with_allocated_interface_name),
+ update_pci_request_with_placement_allocations),
self.context, report_client, pci_requests,
provider_mapping)
@@ -1610,11 +1649,14 @@ class PciRequestUpdateTestCase(test.NoDBTestCase):
report_client = mock.Mock(spec=report.SchedulerReportClient)
report_client.get_resource_provider_name.return_value = (
'host:agent:enp0s31f6')
- pci_requests = [objects.InstancePCIRequest(
- requester_id=uuids.port_1, spec=[{}],)]
+ pci_requests = [
+ objects.InstancePCIRequest(
+ requester_id=uuids.port_1, spec=[{}], request_id=uuids.req1
+ )
+ ]
provider_mapping = {uuids.port_1: [uuids.rp1]}
- compute_utils.update_pci_request_spec_with_allocated_interface_name(
+ compute_utils.update_pci_request_with_placement_allocations(
self.context, report_client, pci_requests, provider_mapping)
report_client.get_resource_provider_name.assert_called_once_with(
diff --git a/nova/tests/unit/compute/test_virtapi.py b/nova/tests/unit/compute/test_virtapi.py
index 0012a684f7..71c9097525 100644
--- a/nova/tests/unit/compute/test_virtapi.py
+++ b/nova/tests/unit/compute/test_virtapi.py
@@ -13,8 +13,9 @@
# under the License.
import collections
+from unittest import mock
-import mock
+import eventlet.timeout
import os_traits
from oslo_utils.fixture import uuidsentinel as uuids
@@ -186,16 +187,159 @@ class ComputeVirtAPITest(VirtAPIBaseTest):
do_test()
+ @mock.patch(
+ 'oslo_utils.timeutils.StopWatch.elapsed',
+ new=mock.Mock(return_value=1.23))
def test_wait_for_instance_event_timeout(self):
+ instance = mock.Mock()
+ instance.vm_state = mock.sentinel.vm_state
+ instance.task_state = mock.sentinel.task_state
+
+ mock_log = mock.Mock()
+
+ @mock.patch.object(compute_manager, 'LOG', new=mock_log)
@mock.patch.object(self.virtapi._compute, '_event_waiter',
- side_effect=test.TestingException())
- @mock.patch('eventlet.timeout.Timeout')
- def do_test(mock_timeout, mock_waiter):
- with self.virtapi.wait_for_instance_event('instance',
- [('foo', 'bar')]):
+ side_effect=eventlet.timeout.Timeout())
+ def do_test(mock_waiter):
+ with self.virtapi.wait_for_instance_event(
+ instance, [('foo', 'bar')]):
pass
- self.assertRaises(test.TestingException, do_test)
+ self.assertRaises(eventlet.timeout.Timeout, do_test)
+ mock_log.warning.assert_called_once_with(
+ 'Timeout waiting for %(events)s for instance with vm_state '
+ '%(vm_state)s and task_state %(task_state)s. '
+ 'Event states are: %(event_states)s',
+ {
+ 'events': ['foo-bar'],
+ 'vm_state': mock.sentinel.vm_state,
+ 'task_state': mock.sentinel.task_state,
+ 'event_states':
+ 'foo-bar: timed out after 1.23 seconds',
+ },
+ instance=instance
+ )
+
+ @mock.patch(
+ 'oslo_utils.timeutils.StopWatch.elapsed',
+ new=mock.Mock(return_value=1.23))
+ def test_wait_for_instance_event_one_received_one_timed_out(self):
+ instance = mock.Mock()
+ instance.vm_state = mock.sentinel.vm_state
+ instance.task_state = mock.sentinel.task_state
+
+ mock_log = mock.Mock()
+
+ calls = []
+
+ def fake_event_waiter(*args, **kwargs):
+ calls.append((args, kwargs))
+ if len(calls) == 1:
+ event = mock.Mock(status="completed")
+ return event
+ else:
+ raise eventlet.timeout.Timeout()
+
+ @mock.patch.object(compute_manager, 'LOG', new=mock_log)
+ @mock.patch.object(self.virtapi._compute, '_event_waiter',
+ side_effect=fake_event_waiter)
+ def do_test(mock_waiter):
+ with self.virtapi.wait_for_instance_event(
+ instance, [('foo', 'bar'), ('missing', 'event')]):
+ pass
+
+ self.assertRaises(eventlet.timeout.Timeout, do_test)
+ mock_log.warning.assert_called_once_with(
+ 'Timeout waiting for %(events)s for instance with vm_state '
+ '%(vm_state)s and task_state %(task_state)s. '
+ 'Event states are: %(event_states)s',
+ {
+ 'events': ['foo-bar', 'missing-event'],
+ 'vm_state': mock.sentinel.vm_state,
+ 'task_state': mock.sentinel.task_state,
+ 'event_states':
+ 'foo-bar: received after waiting 1.23 seconds, '
+ 'missing-event: timed out after 1.23 seconds',
+ },
+ instance=instance
+ )
+
+ @mock.patch(
+ 'oslo_utils.timeutils.StopWatch.elapsed',
+ new=mock.Mock(return_value=1.23))
+ def test_wait_for_instance_event_multiple_events(self):
+ instance = mock.Mock()
+ instance.vm_state = mock.sentinel.vm_state
+ instance.task_state = mock.sentinel.task_state
+
+ mock_log = mock.Mock()
+
+ calls = []
+
+ def fake_event_waiter(*args, **kwargs):
+ calls.append((args, kwargs))
+ if len(calls) == 1:
+ event = mock.Mock(status="completed")
+ return event
+ else:
+ raise eventlet.timeout.Timeout()
+
+ def fake_prepare_for_instance_event(instance, name, tag):
+ m = mock.MagicMock()
+ m.instance = instance
+ m.name = name
+ m.tag = tag
+ m.event_name = '%s-%s' % (name, tag)
+ m.wait.side_effect = fake_event_waiter
+ if name == 'received-but-not-waited':
+ m.ready.return_value = True
+ if name == 'missing-but-not-waited':
+ m.ready.return_value = False
+ return m
+
+ self.virtapi._compute.instance_events.prepare_for_instance_event.\
+ side_effect = fake_prepare_for_instance_event
+
+ @mock.patch.object(compute_manager, 'LOG', new=mock_log)
+ def do_test():
+ with self.virtapi.wait_for_instance_event(
+ instance,
+ [
+ ('received', 'event'),
+ ('early', 'event'),
+ ('missing', 'event'),
+ ('received-but-not-waited', 'event'),
+ ('missing-but-not-waited', 'event'),
+ ]
+ ):
+ self.virtapi.exit_wait_early([('early', 'event')])
+
+ self.assertRaises(eventlet.timeout.Timeout, do_test)
+ mock_log.warning.assert_called_once_with(
+ 'Timeout waiting for %(events)s for instance with vm_state '
+ '%(vm_state)s and task_state %(task_state)s. '
+ 'Event states are: %(event_states)s',
+ {
+ 'events':
+ [
+ 'received-event',
+ 'early-event',
+ 'missing-event',
+ 'received-but-not-waited-event',
+ 'missing-but-not-waited-event'
+ ],
+ 'vm_state': mock.sentinel.vm_state,
+ 'task_state': mock.sentinel.task_state,
+ 'event_states':
+ 'received-event: received after waiting 1.23 seconds, '
+ 'early-event: received early, '
+ 'missing-event: timed out after 1.23 seconds, '
+ 'received-but-not-waited-event: received but not '
+ 'processed, '
+ 'missing-but-not-waited-event: expected but not received'
+ },
+ instance=instance
+ )
def test_wait_for_instance_event_exit_early(self):
# Wait for two events, exit early skipping one.
diff --git a/nova/tests/unit/conductor/tasks/test_base.py b/nova/tests/unit/conductor/tasks/test_base.py
index a7151c4cd0..cf9e8f9cfd 100644
--- a/nova/tests/unit/conductor/tasks/test_base.py
+++ b/nova/tests/unit/conductor/tasks/test_base.py
@@ -10,7 +10,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from nova.conductor.tasks import base
from nova import test
diff --git a/nova/tests/unit/conductor/tasks/test_cross_cell_migrate.py b/nova/tests/unit/conductor/tasks/test_cross_cell_migrate.py
index 127d763477..c4b6c217b6 100644
--- a/nova/tests/unit/conductor/tasks/test_cross_cell_migrate.py
+++ b/nova/tests/unit/conductor/tasks/test_cross_cell_migrate.py
@@ -11,8 +11,8 @@
# under the License.
import copy
+from unittest import mock
-import mock
from oslo_messaging import exceptions as messaging_exceptions
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import timeutils
@@ -439,7 +439,7 @@ class CrossCellMigrationTaskTestCase(test.NoDBTestCase):
what we need.
"""
with mock.patch.object(
- self.task.network_api, 'supports_port_binding_extension',
+ self.task.network_api, 'has_port_binding_extension',
return_value=True) as mock_neutron_check:
self.task._perform_external_api_checks()
mock_neutron_check.assert_called_once_with(self.task.context)
@@ -447,7 +447,7 @@ class CrossCellMigrationTaskTestCase(test.NoDBTestCase):
def test_perform_external_api_checks_old_neutron(self):
"""Tests the case that neutron API is old."""
with mock.patch.object(
- self.task.network_api, 'supports_port_binding_extension',
+ self.task.network_api, 'has_port_binding_extension',
return_value=False):
ex = self.assertRaises(exception.MigrationPreCheckError,
self.task._perform_external_api_checks)
diff --git a/nova/tests/unit/conductor/tasks/test_live_migrate.py b/nova/tests/unit/conductor/tasks/test_live_migrate.py
index 88f00d0d84..4e888139f6 100644
--- a/nova/tests/unit/conductor/tasks/test_live_migrate.py
+++ b/nova/tests/unit/conductor/tasks/test_live_migrate.py
@@ -10,7 +10,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
import oslo_messaging as messaging
from oslo_serialization import jsonutils
from oslo_utils.fixture import uuidsentinel as uuids
@@ -345,6 +346,36 @@ class LiveMigrationTaskTestCase(test.NoDBTestCase):
mock.call(self.destination)],
mock_get_info.call_args_list)
+ @mock.patch.object(live_migrate.LiveMigrationTask, '_get_compute_info')
+ def test_skip_hypervisor_version_check_on_lm_raise_ex(self, mock_get_info):
+ host1 = {'hypervisor_type': 'a', 'hypervisor_version': 7}
+ host2 = {'hypervisor_type': 'a', 'hypervisor_version': 6}
+ self.flags(group='workarounds',
+ skip_hypervisor_version_check_on_lm=False)
+ mock_get_info.side_effect = [objects.ComputeNode(**host1),
+ objects.ComputeNode(**host2)]
+ self.assertRaises(exception.DestinationHypervisorTooOld,
+ self.task._check_compatible_with_source_hypervisor,
+ self.destination)
+ self.assertEqual([mock.call(self.instance_host),
+ mock.call(self.destination)],
+ mock_get_info.call_args_list)
+
+ @mock.patch.object(live_migrate.LiveMigrationTask, '_get_compute_info')
+ def test_skip_hypervisor_version_check_on_lm_do_not_raise_ex(
+ self, mock_get_info
+ ):
+ host1 = {'hypervisor_type': 'a', 'hypervisor_version': 7}
+ host2 = {'hypervisor_type': 'a', 'hypervisor_version': 6}
+ self.flags(group='workarounds',
+ skip_hypervisor_version_check_on_lm=True)
+ mock_get_info.side_effect = [objects.ComputeNode(**host1),
+ objects.ComputeNode(**host2)]
+ self.task._check_compatible_with_source_hypervisor(self.destination)
+ self.assertEqual([mock.call(self.instance_host),
+ mock.call(self.destination)],
+ mock_get_info.call_args_list)
+
@mock.patch.object(compute_rpcapi.ComputeAPI,
'check_can_live_migrate_destination')
def test_check_requested_destination(self, mock_check):
@@ -353,7 +384,7 @@ class LiveMigrationTaskTestCase(test.NoDBTestCase):
with test.nested(
mock.patch.object(self.task.network_api,
- 'supports_port_binding_extension',
+ 'has_port_binding_extension',
return_value=False),
mock.patch.object(self.task, '_check_can_migrate_pci')):
self.assertIsNone(self.task._check_requested_destination())
@@ -387,7 +418,7 @@ class LiveMigrationTaskTestCase(test.NoDBTestCase):
with test.nested(
mock.patch.object(self.task.network_api,
- 'supports_port_binding_extension',
+ 'has_port_binding_extension',
return_value=False),
mock.patch.object(self.task, '_check_can_migrate_pci')):
ex = self.assertRaises(exception.MigrationPreCheckError,
@@ -730,7 +761,7 @@ class LiveMigrationTaskTestCase(test.NoDBTestCase):
@mock.patch(
'nova.compute.utils.'
- 'update_pci_request_spec_with_allocated_interface_name')
+ 'update_pci_request_with_placement_allocations')
@mock.patch('nova.scheduler.utils.fill_provider_mapping')
@mock.patch.object(live_migrate.LiveMigrationTask,
'_call_livem_checks_on_host')
@@ -813,7 +844,7 @@ class LiveMigrationTaskTestCase(test.NoDBTestCase):
"""
@mock.patch.object(self.task.network_api,
- 'supports_port_binding_extension')
+ 'has_port_binding_extension')
@mock.patch.object(live_migrate,
'supports_vif_related_pci_allocations')
def _test(instance_pci_reqs,
diff --git a/nova/tests/unit/conductor/tasks/test_migrate.py b/nova/tests/unit/conductor/tasks/test_migrate.py
index 145e54f884..46cb033c5c 100644
--- a/nova/tests/unit/conductor/tasks/test_migrate.py
+++ b/nova/tests/unit/conductor/tasks/test_migrate.py
@@ -10,7 +10,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from oslo_serialization import jsonutils
from oslo_utils.fixture import uuidsentinel as uuids
diff --git a/nova/tests/unit/conductor/test_conductor.py b/nova/tests/unit/conductor/test_conductor.py
index 9445db1b62..971570dfb5 100644
--- a/nova/tests/unit/conductor/test_conductor.py
+++ b/nova/tests/unit/conductor/test_conductor.py
@@ -16,9 +16,12 @@
"""Tests for the conductor service."""
import copy
+import ddt
+from unittest import mock
-import mock
+from keystoneauth1 import exceptions as ks_exc
from oslo_db import exception as db_exc
+from oslo_limit import exception as limit_exceptions
import oslo_messaging as messaging
from oslo_serialization import jsonutils
from oslo_utils.fixture import uuidsentinel as uuids
@@ -44,12 +47,14 @@ from nova.db.api import models as api_models
from nova.db.main import api as main_db_api
from nova import exception as exc
from nova.image import glance as image_api
+from nova.limit import placement as placement_limit
from nova import objects
from nova.objects import base as obj_base
from nova.objects import block_device as block_device_obj
from nova.objects import fields
from nova.objects import request_spec
from nova.scheduler.client import query
+from nova.scheduler.client import report
from nova.scheduler import utils as scheduler_utils
from nova import test
from nova.tests import fixtures
@@ -383,7 +388,9 @@ class _BaseTaskTestCase(object):
'on_shared_storage': False,
'preserve_ephemeral': False,
'host': 'compute-host',
- 'request_spec': None}
+ 'request_spec': None,
+ 'reimage_boot_volume': False,
+ 'target_state': None}
if update_args:
rebuild_args.update(update_args)
compute_rebuild_args = copy.deepcopy(rebuild_args)
@@ -2261,6 +2268,7 @@ class ConductorTaskTestCase(_BaseTaskTestCase, test_compute.BaseTestCase):
rs.instance_group = None
rs.retry = None
rs.limits = None
+ rs.is_bfv = False
rs.create()
params['request_specs'] = [rs]
params['image'] = {'fake_data': 'should_pass_silently'}
@@ -2399,7 +2407,7 @@ class ConductorTaskTestCase(_BaseTaskTestCase, test_compute.BaseTestCase):
'1', None, None, dp_name)
arq_uuid = arq_in_list[0]['uuid']
- # muliti device request
+ # multi device request
mock_create.return_value = [arq_in_list[0], arq_in_list[0]]
rp_map = {"request_group_0" + str(port_id): rp_uuid}
request_tuples = [('123', '1.2.3.4', port_id,
@@ -2871,6 +2879,74 @@ class ConductorTaskTestCase(_BaseTaskTestCase, test_compute.BaseTestCase):
'image'):
self.assertIn(key, request_spec_dict)
+ @mock.patch.object(placement_limit, 'enforce_num_instances_and_flavor')
+ @mock.patch('nova.compute.utils.notify_about_compute_task_error')
+ @mock.patch('nova.scheduler.rpcapi.SchedulerAPI.select_destinations')
+ def test_schedule_and_build_over_quota_during_recheck_ul(self, mock_select,
+ mock_notify,
+ mock_enforce):
+ self.flags(driver="nova.quota.UnifiedLimitsDriver",
+ cores=1,
+ group="quota")
+ mock_select.return_value = [[fake_selection1]]
+ # Simulate a race where the first check passes and the recheck fails.
+ # First check occurs in compute/api.
+ project_id = self.params['context'].project_id
+ mock_enforce.side_effect = limit_exceptions.ProjectOverLimit(
+ project_id, [limit_exceptions.OverLimitInfo('cores', 2, 3, 0)])
+
+ original_save = objects.Instance.save
+
+ def fake_save(inst, *args, **kwargs):
+ # Make sure the context is targeted to the cell that the instance
+ # was created in.
+ self.assertIsNotNone(
+ inst._context.db_connection, 'Context is not targeted')
+ original_save(inst, *args, **kwargs)
+
+ self.stub_out('nova.objects.Instance.save', fake_save)
+
+ # This is needed to register the compute node in a cell.
+ self.start_service('compute', host='host1')
+ self.assertRaises(
+ limit_exceptions.ProjectOverLimit,
+ self.conductor.schedule_and_build_instances, **self.params)
+
+ mock_enforce.assert_called_once_with(
+ self.params['context'], project_id, mock.ANY, False, 0, 0)
+
+ # Verify we set the instance to ERROR state and set the fault message.
+ instances = objects.InstanceList.get_all(self.ctxt)
+ self.assertEqual(1, len(instances))
+ instance = instances[0]
+ self.assertEqual(vm_states.ERROR, instance.vm_state)
+ self.assertIsNone(instance.task_state)
+ self.assertIn('ProjectOverLimit', instance.fault.message)
+ # Verify we removed the build objects.
+ build_requests = objects.BuildRequestList.get_all(self.ctxt)
+ # Verify that the instance is mapped to a cell
+ inst_mapping = objects.InstanceMapping.get_by_instance_uuid(
+ self.ctxt, instance.uuid)
+ self.assertIsNotNone(inst_mapping.cell_mapping)
+
+ self.assertEqual(0, len(build_requests))
+
+ @api_db_api.context_manager.reader
+ def request_spec_get_all(context):
+ return context.session.query(api_models.RequestSpec).all()
+
+ request_specs = request_spec_get_all(self.ctxt)
+ self.assertEqual(0, len(request_specs))
+
+ mock_notify.assert_called_once_with(
+ test.MatchType(context.RequestContext), 'build_instances',
+ instance.uuid, test.MatchType(dict), 'error',
+ test.MatchType(limit_exceptions.ProjectOverLimit))
+ request_spec_dict = mock_notify.call_args_list[0][0][3]
+ for key in ('instance_type', 'num_instances', 'instance_properties',
+ 'image'):
+ self.assertIn(key, request_spec_dict)
+
@mock.patch('nova.compute.rpcapi.ComputeAPI.build_and_run_instance')
@mock.patch('nova.objects.quotas.Quotas.check_deltas')
@mock.patch('nova.scheduler.rpcapi.SchedulerAPI.select_destinations')
@@ -4676,6 +4752,68 @@ class ConductorTaskRPCAPITestCase(_BaseTaskTestCase,
mock.sentinel.migration)
can_send_version.assert_called_once_with('1.23')
+ def test_evacuate_old_rpc_with_target_state(self):
+ inst_obj = self._create_fake_instance_obj()
+ rebuild_args, compute_args = self._prepare_rebuild_args(
+ {'host': inst_obj.host,
+ 'target_state': 'stopped'})
+ with mock.patch.object(
+ self.conductor.client, 'can_send_version', return_value=False):
+ self.assertRaises(exc.UnsupportedRPCVersion,
+ self.conductor.rebuild_instance,
+ self.context, inst_obj, **rebuild_args)
+
+ def test_evacuate_old_rpc_without_target_state(self):
+ inst_obj = self._create_fake_instance_obj()
+ rebuild_args, compute_args = self._prepare_rebuild_args(
+ {'host': inst_obj.host,
+ 'target_state': None})
+ with mock.patch.object(
+ self.conductor.client, 'can_send_version',
+ return_value=False) as can_send_version:
+ self.conductor.rebuild_instance(
+ self.context, inst_obj, **rebuild_args)
+ can_send_version.assert_has_calls([
+ mock.call('1.25'), mock.call('1.24'),
+ mock.call('1.12')])
+
+ def test_rebuild_instance_volume_backed(self):
+ inst_obj = self._create_fake_instance_obj()
+ version = '1.25'
+ cctxt_mock = mock.MagicMock()
+ rebuild_args, compute_args = self._prepare_rebuild_args(
+ {'host': inst_obj.host})
+ rebuild_args['reimage_boot_volume'] = True
+
+ @mock.patch.object(self.conductor.client, 'prepare',
+ return_value=cctxt_mock)
+ @mock.patch.object(self.conductor.client, 'can_send_version',
+ return_value=True)
+ def _test(mock_can_send_ver, prepare_mock):
+ self.conductor.rebuild_instance(
+ self.context, inst_obj, **rebuild_args)
+ prepare_mock.assert_called_once_with(version=version)
+ kw = {'instance': inst_obj, **rebuild_args}
+ cctxt_mock.cast.assert_called_once_with(
+ self.context, 'rebuild_instance', **kw)
+ _test()
+
+ def test_rebuild_instance_volume_backed_old_service(self):
+ """Tests rebuild_instance_volume_backed when the service is too old"""
+ inst_obj = mock.MagicMock()
+ rebuild_args, compute_args = self._prepare_rebuild_args(
+ {'host': inst_obj.host})
+ rebuild_args['reimage_boot_volume'] = True
+ with mock.patch.object(
+ self.conductor.client, 'can_send_version',
+ return_value=False) as can_send_version:
+ self.assertRaises(exc.NovaException,
+ self.conductor.rebuild_instance,
+ self.context, inst_obj,
+ **rebuild_args)
+ can_send_version.assert_has_calls([mock.call('1.25'),
+ mock.call('1.24')])
+
class ConductorTaskAPITestCase(_BaseTaskTestCase, test_compute.BaseTestCase):
"""Compute task API Tests."""
@@ -4798,3 +4936,35 @@ class ConductorTaskAPITestCase(_BaseTaskTestCase, test_compute.BaseTestCase):
logtext)
self.assertIn('host3\' because it is not up', logtext)
self.assertIn('image1 failed 1 times', logtext)
+
+
+@ddt.ddt
+class TestConductorTaskManager(test.NoDBTestCase):
+ def test_placement_client_startup(self):
+ self.assertIsNone(report.PLACEMENTCLIENT)
+ conductor_manager.ComputeTaskManager()
+ self.assertIsNotNone(report.PLACEMENTCLIENT)
+
+ @ddt.data(ks_exc.MissingAuthPlugin,
+ ks_exc.Unauthorized,
+ test.TestingException)
+ def test_placement_client_startup_fatals(self, exc):
+ self.assertRaises(exc,
+ self._test_placement_client_startup_exception, exc)
+
+ @ddt.data(ks_exc.EndpointNotFound,
+ ks_exc.DiscoveryFailure,
+ ks_exc.RequestTimeout,
+ ks_exc.GatewayTimeout,
+ ks_exc.ConnectFailure)
+ def test_placement_client_startup_non_fatal(self, exc):
+ self._test_placement_client_startup_exception(exc)
+
+ @mock.patch.object(report, 'LOG')
+ def _test_placement_client_startup_exception(self, exc, mock_log):
+ with mock.patch.object(report.SchedulerReportClient, '_create_client',
+ side_effect=exc):
+ try:
+ conductor_manager.ComputeTaskManager()
+ finally:
+ mock_log.error.assert_called_once()
diff --git a/nova/tests/unit/console/rfb/test_auth.py b/nova/tests/unit/console/rfb/test_auth.py
index c4026b6637..1d66b2684f 100644
--- a/nova/tests/unit/console/rfb/test_auth.py
+++ b/nova/tests/unit/console/rfb/test_auth.py
@@ -12,7 +12,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from nova.console.rfb import auth
from nova.console.rfb import authnone
diff --git a/nova/tests/unit/console/rfb/test_authnone.py b/nova/tests/unit/console/rfb/test_authnone.py
index e628106e3b..3ca44dce89 100644
--- a/nova/tests/unit/console/rfb/test_authnone.py
+++ b/nova/tests/unit/console/rfb/test_authnone.py
@@ -13,7 +13,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from nova.console.rfb import auth
from nova.console.rfb import authnone
diff --git a/nova/tests/unit/console/rfb/test_authvencrypt.py b/nova/tests/unit/console/rfb/test_authvencrypt.py
index f7fc31939e..de9bccb44a 100644
--- a/nova/tests/unit/console/rfb/test_authvencrypt.py
+++ b/nova/tests/unit/console/rfb/test_authvencrypt.py
@@ -14,8 +14,8 @@
import ssl
import struct
+from unittest import mock
-import mock
from nova.console.rfb import auth
from nova.console.rfb import authvencrypt
diff --git a/nova/tests/unit/console/securityproxy/test_rfb.py b/nova/tests/unit/console/securityproxy/test_rfb.py
index 3eb8ba6acf..17cf8f7c57 100644
--- a/nova/tests/unit/console/securityproxy/test_rfb.py
+++ b/nova/tests/unit/console/securityproxy/test_rfb.py
@@ -15,7 +15,7 @@
"""Tests the Console Security Proxy Framework."""
-import mock
+from unittest import mock
from nova.console.rfb import auth
from nova.console.rfb import authnone
diff --git a/nova/tests/unit/console/test_serial.py b/nova/tests/unit/console/test_serial.py
index bc87ca6ca2..44d88e6e83 100644
--- a/nova/tests/unit/console/test_serial.py
+++ b/nova/tests/unit/console/test_serial.py
@@ -15,8 +15,7 @@
"""Tests for Serial Console."""
import socket
-
-import mock
+from unittest import mock
from nova.console import serial
from nova import exception
diff --git a/nova/tests/unit/console/test_websocketproxy.py b/nova/tests/unit/console/test_websocketproxy.py
index e05ae520d9..639623bbb5 100644
--- a/nova/tests/unit/console/test_websocketproxy.py
+++ b/nova/tests/unit/console/test_websocketproxy.py
@@ -17,8 +17,8 @@
import copy
import io
import socket
+from unittest import mock
-import mock
from oslo_utils.fixture import uuidsentinel as uuids
import nova.conf
@@ -302,8 +302,6 @@ class NovaProxyRequestHandlerTestCase(test.NoDBTestCase):
'instance_uuid': uuids.instance,
'host': 'node1',
'port': '10000',
- 'host': 'node1',
- 'port': '10000',
'internal_access_path': 'xxx',
'console_type': 'novnc',
'access_url_base': 'https://example.net:6080'
@@ -589,12 +587,12 @@ class NovaProxyRequestHandlerTestCase(test.NoDBTestCase):
self.wh.socket.assert_called_with('node1', 10000, connect=True)
self.wh.do_proxy.assert_called_with('<socket>')
- def test_reject_open_redirect(self):
+ def test_reject_open_redirect(self, url='//example.com/%2F..'):
# This will test the behavior when an attempt is made to cause an open
# redirect. It should be rejected.
mock_req = mock.MagicMock()
mock_req.makefile().readline.side_effect = [
- b'GET //example.com/%2F.. HTTP/1.1\r\n',
+ f'GET {url} HTTP/1.1\r\n'.encode('utf-8'),
b''
]
@@ -619,41 +617,34 @@ class NovaProxyRequestHandlerTestCase(test.NoDBTestCase):
result = output.readlines()
# Verify no redirect happens and instead a 400 Bad Request is returned.
- self.assertIn('400 URI must not start with //', result[0].decode())
+ # NOTE: As of python 3.10.6 there is a fix for this vulnerability,
+ # which will cause a 301 Moved Permanently error to be returned
+ # instead that redirects to a sanitized version of the URL with extra
+ # leading '/' characters removed.
+ # See https://github.com/python/cpython/issues/87389 for details.
+ # We will consider either response to be valid for this test. This will
+ # also help if and when the above fix gets backported to older versions
+ # of python.
+ errmsg = result[0].decode()
+ expected_nova = '400 URI must not start with //'
+ expected_cpython = '301 Moved Permanently'
+
+ self.assertTrue(expected_nova in errmsg or expected_cpython in errmsg)
+
+ # If we detect the cpython fix, verify that the redirect location is
+ # now the same url but with extra leading '/' characters removed.
+ if expected_cpython in errmsg:
+ location = result[3].decode()
+ if location.startswith('Location: '):
+ location = location[len('Location: '):]
+ location = location.rstrip('\r\n')
+ self.assertTrue(
+ location.startswith('/example.com/%2F..'),
+ msg='Redirect location is not the expected sanitized URL',
+ )
def test_reject_open_redirect_3_slashes(self):
- # This will test the behavior when an attempt is made to cause an open
- # redirect. It should be rejected.
- mock_req = mock.MagicMock()
- mock_req.makefile().readline.side_effect = [
- b'GET ///example.com/%2F.. HTTP/1.1\r\n',
- b''
- ]
-
- # Collect the response data to verify at the end. The
- # SimpleHTTPRequestHandler writes the response data by calling the
- # request socket sendall() method.
- self.data = b''
-
- def fake_sendall(data):
- self.data += data
-
- mock_req.sendall.side_effect = fake_sendall
-
- client_addr = ('8.8.8.8', 54321)
- mock_server = mock.MagicMock()
- # This specifies that the server will be able to handle requests other
- # than only websockets.
- mock_server.only_upgrade = False
-
- # Constructing a handler will process the mock_req request passed in.
- websocketproxy.NovaProxyRequestHandler(
- mock_req, client_addr, mock_server)
-
- # Verify no redirect happens and instead a 400 Bad Request is returned.
- self.data = self.data.decode()
- self.assertIn('Error code: 400', self.data)
- self.assertIn('Message: URI must not start with //', self.data)
+ self.test_reject_open_redirect(url='///example.com/%2F..')
@mock.patch('nova.objects.ConsoleAuthToken.validate')
def test_no_compute_rpcapi_with_invalid_token(self, mock_validate):
diff --git a/nova/tests/unit/db/api/test_api.py b/nova/tests/unit/db/api/test_api.py
index 251407612f..6113791a8e 100644
--- a/nova/tests/unit/db/api/test_api.py
+++ b/nova/tests/unit/db/api/test_api.py
@@ -10,7 +10,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from nova.db.api import api as db_api
from nova import test
diff --git a/nova/tests/unit/db/api/test_migrations.py b/nova/tests/unit/db/api/test_migrations.py
index 1b14d569db..7c99f2f44a 100644
--- a/nova/tests/unit/db/api/test_migrations.py
+++ b/nova/tests/unit/db/api/test_migrations.py
@@ -21,10 +21,10 @@ test will then use that DB and username/password combo to run the tests. Refer
to the 'tools/test-setup.sh' for an example of how to configure this.
"""
+from unittest import mock
+
from alembic import command as alembic_api
from alembic import script as alembic_script
-from migrate.versioning import api as migrate_api
-import mock
from oslo_db.sqlalchemy import enginefacade
from oslo_db.sqlalchemy import test_fixtures
from oslo_db.sqlalchemy import test_migrations
@@ -126,47 +126,6 @@ class TestModelsSyncPostgreSQL(
FIXTURE = test_fixtures.PostgresqlOpportunisticFixture
-class NovaModelsMigrationsLegacySync(NovaModelsMigrationsSync):
- """Test that the models match the database after old migrations are run."""
-
- def db_sync(self, engine):
- # the 'nova.db.migration.db_sync' method will not use the legacy
- # sqlalchemy-migrate-based migration flow unless the database is
- # already controlled with sqlalchemy-migrate, so we need to manually
- # enable version controlling with this tool to test this code path
- repository = migration._find_migrate_repo(database='api')
- migrate_api.version_control(
- engine, repository, migration.MIGRATE_INIT_VERSION['api'])
-
- # now we can apply migrations as expected and the legacy path will be
- # followed
- super().db_sync(engine)
-
-
-class TestModelsLegacySyncSQLite(
- NovaModelsMigrationsLegacySync,
- test_fixtures.OpportunisticDBTestMixin,
- testtools.TestCase,
-):
- pass
-
-
-class TestModelsLegacySyncMySQL(
- NovaModelsMigrationsLegacySync,
- test_fixtures.OpportunisticDBTestMixin,
- testtools.TestCase,
-):
- FIXTURE = test_fixtures.MySQLOpportunisticFixture
-
-
-class TestModelsLegacySyncPostgreSQL(
- NovaModelsMigrationsLegacySync,
- test_fixtures.OpportunisticDBTestMixin,
- testtools.TestCase,
-):
- FIXTURE = test_fixtures.PostgresqlOpportunisticFixture
-
-
class NovaMigrationsWalk(
test_fixtures.OpportunisticDBTestMixin, test.NoDBTestCase,
):
@@ -179,7 +138,7 @@ class NovaMigrationsWalk(
super().setUp()
self.engine = enginefacade.writer.get_engine()
self.config = migration._find_alembic_conf('api')
- self.init_version = migration.ALEMBIC_INIT_VERSION['api']
+ self.init_version = 'd67eeaabee36'
def _migrate_up(self, connection, revision):
if revision == self.init_version: # no tests for the initial revision
diff --git a/nova/tests/unit/db/main/test_api.py b/nova/tests/unit/db/main/test_api.py
index c9a9e83154..98f9c854d9 100644
--- a/nova/tests/unit/db/main/test_api.py
+++ b/nova/tests/unit/db/main/test_api.py
@@ -18,10 +18,10 @@
import copy
import datetime
+from unittest import mock
from dateutil import parser as dateutil_parser
import iso8601
-import mock
import netaddr
from oslo_db import api as oslo_db_api
from oslo_db import exception as db_exc
@@ -167,27 +167,25 @@ class DbTestCase(test.TestCase):
class HelperTestCase(test.TestCase):
@mock.patch('sqlalchemy.orm.joinedload')
def test_joinedload_helper(self, mock_jl):
- query = db._joinedload_all('foo.bar.baz')
+ query = db._joinedload_all(
+ models.SecurityGroup, 'instances.info_cache'
+ )
# We call sqlalchemy.orm.joinedload() on the first element
- mock_jl.assert_called_once_with('foo')
+ mock_jl.assert_called_once_with(models.SecurityGroup.instances)
# Then first.joinedload(second)
column2 = mock_jl.return_value
- column2.joinedload.assert_called_once_with('bar')
-
- # Then second.joinedload(third)
- column3 = column2.joinedload.return_value
- column3.joinedload.assert_called_once_with('baz')
+ column2.joinedload.assert_called_once_with(models.Instance.info_cache)
- self.assertEqual(column3.joinedload.return_value, query)
+ self.assertEqual(column2.joinedload.return_value, query)
@mock.patch('sqlalchemy.orm.joinedload')
def test_joinedload_helper_single(self, mock_jl):
- query = db._joinedload_all('foo')
+ query = db._joinedload_all(models.SecurityGroup, 'instances')
# We call sqlalchemy.orm.joinedload() on the first element
- mock_jl.assert_called_once_with('foo')
+ mock_jl.assert_called_once_with(models.SecurityGroup.instances)
# We should have gotten back just the result of the joinedload()
# call if there were no other elements
@@ -279,33 +277,21 @@ class DecoratorTestCase(test.TestCase):
'No DB access allowed in ',
mock_log.error.call_args[0][0])
- @mock.patch.object(db, 'LOG')
- @mock.patch.object(db, 'DISABLE_DB_ACCESS', return_value=True)
- def test_pick_context_manager_writer_disable_db_access(
- self, mock_DISABLE_DB_ACCESS, mock_log,
- ):
+ def test_pick_context_manager_writer_disable_db_access(self):
@db.pick_context_manager_writer
def func(context, value):
pass
self._test_pick_context_manager_disable_db_access(func)
- @mock.patch.object(db, 'LOG')
- @mock.patch.object(db, 'DISABLE_DB_ACCESS', return_value=True)
- def test_pick_context_manager_reader_disable_db_access(
- self, mock_DISABLE_DB_ACCESS, mock_log,
- ):
+ def test_pick_context_manager_reader_disable_db_access(self):
@db.pick_context_manager_reader
def func(context, value):
pass
self._test_pick_context_manager_disable_db_access(func)
- @mock.patch.object(db, 'LOG')
- @mock.patch.object(db, 'DISABLE_DB_ACCESS', return_value=True)
- def test_pick_context_manager_reader_allow_async_disable_db_access(
- self, mock_DISABLE_DB_ACCESS, mock_log,
- ):
+ def test_pick_context_manager_reader_allow_async_disable_db_access(self):
@db.pick_context_manager_reader_allow_async
def func(context, value):
pass
@@ -1683,28 +1669,40 @@ class InstanceTestCase(test.TestCase, ModelsObjectComparatorMixin):
instances = db.instance_get_all_by_filters_sort(self.ctxt, filters)
self.assertEqual([], instances)
- @mock.patch('sqlalchemy.orm.undefer')
@mock.patch('sqlalchemy.orm.joinedload')
- def test_instance_get_all_by_filters_extra_columns(self,
- mock_joinedload,
- mock_undefer):
+ def test_instance_get_all_by_filters_extra_columns(self, mock_joinedload):
db.instance_get_all_by_filters_sort(
self.ctxt, {},
- columns_to_join=['info_cache', 'extra.pci_requests'])
- mock_joinedload.assert_called_once_with('info_cache')
- mock_undefer.assert_called_once_with('extra.pci_requests')
+ columns_to_join=['info_cache', 'extra.pci_requests'],
+ )
+ mock_joinedload.assert_has_calls(
+ [
+ mock.call(models.Instance.info_cache),
+ mock.ANY,
+ mock.call(models.Instance.extra),
+ mock.ANY,
+ mock.ANY,
+ ]
+ )
- @mock.patch('sqlalchemy.orm.undefer')
@mock.patch('sqlalchemy.orm.joinedload')
- def test_instance_get_active_by_window_extra_columns(self,
- mock_joinedload,
- mock_undefer):
+ def test_instance_get_active_by_window_extra_columns(
+ self, mock_joinedload,
+ ):
now = datetime.datetime(2013, 10, 10, 17, 16, 37, 156701)
db.instance_get_active_by_window_joined(
self.ctxt, now,
- columns_to_join=['info_cache', 'extra.pci_requests'])
- mock_joinedload.assert_called_once_with('info_cache')
- mock_undefer.assert_called_once_with('extra.pci_requests')
+ columns_to_join=['info_cache', 'extra.pci_requests'],
+ )
+ mock_joinedload.assert_has_calls(
+ [
+ mock.call(models.Instance.info_cache),
+ mock.ANY,
+ mock.call(models.Instance.extra),
+ mock.ANY,
+ mock.ANY,
+ ]
+ )
def test_instance_get_all_by_filters_with_meta(self):
self.create_instance_with_args()
@@ -3349,7 +3347,7 @@ class InstanceActionTestCase(test.TestCase, ModelsObjectComparatorMixin):
self._assertEqualListsOfObjects(expected, actions)
def test_instance_actions_get_are_in_order(self):
- """Ensure retrived actions are in order."""
+ """Ensure retrieved actions are in order."""
uuid1 = uuidsentinel.uuid1
extra = {
@@ -3608,7 +3606,7 @@ class InstanceActionTestCase(test.TestCase, ModelsObjectComparatorMixin):
self._assertActionEventSaved(event, action['id'])
def test_instance_action_events_get_are_in_order(self):
- """Ensure retrived action events are in order."""
+ """Ensure retrieved action events are in order."""
uuid1 = uuidsentinel.uuid1
action = db.action_start(self.ctxt,
@@ -5653,7 +5651,6 @@ class ArchiveTestCase(test.TestCase, ModelsObjectComparatorMixin):
super(ArchiveTestCase, self).setUp()
self.engine = db.get_engine()
self.metadata = sa.MetaData()
- self.conn = self.engine.connect()
self.instance_id_mappings = models.InstanceIdMapping.__table__
self.shadow_instance_id_mappings = sqlalchemyutils.get_table(
self.engine, "shadow_instance_id_mappings")
@@ -5681,17 +5678,18 @@ class ArchiveTestCase(test.TestCase, ModelsObjectComparatorMixin):
"""Ensure shadow tables are empty
This method ensures that all the shadow tables in the schema,
- except for specificially named exceptions, are empty. This
+ except for specifically named exceptions, are empty. This
makes sure that archiving isn't moving unexpected content.
"""
metadata = sa.MetaData()
metadata.reflect(bind=self.engine)
- for table in metadata.tables:
- if table.startswith("shadow_") and table not in exceptions:
- rows = self.conn.exec_driver_sql(
- "SELECT * FROM %s" % table
- ).fetchall()
- self.assertEqual(rows, [], "Table %s not empty" % table)
+ with self.engine.connect() as conn, conn.begin():
+ for table in metadata.tables:
+ if table.startswith("shadow_") and table not in exceptions:
+ rows = conn.exec_driver_sql(
+ "SELECT * FROM %s" % table
+ ).fetchall()
+ self.assertEqual(rows, [], "Table %s not empty" % table)
def test_shadow_tables(self):
"""Validate shadow table schema.
@@ -5744,57 +5742,72 @@ class ArchiveTestCase(test.TestCase, ModelsObjectComparatorMixin):
self._assert_shadow_tables_empty_except()
def test_archive_deleted_rows(self):
- # Add 6 rows to table
- for uuidstr in self.uuidstrs:
- ins_stmt = self.instance_id_mappings.insert().values(uuid=uuidstr)
- self.conn.execute(ins_stmt)
+ with self.engine.connect() as conn, conn.begin():
+ # Add 6 rows to table
+ for uuidstr in self.uuidstrs:
+ ins_stmt = self.instance_id_mappings.insert().values(
+ uuid=uuidstr,
+ )
+ conn.execute(ins_stmt)
+
# Set 4 to deleted
- update_statement = self.instance_id_mappings.update().\
- where(self.instance_id_mappings.c.uuid.in_(self.uuidstrs[:4]))\
- .values(deleted=1, deleted_at=timeutils.utcnow())
- self.conn.execute(update_statement)
+ with self.engine.connect() as conn, conn.begin():
+ update_statement = self.instance_id_mappings.update().where(
+ self.instance_id_mappings.c.uuid.in_(self.uuidstrs[:4])
+ ).values(deleted=1, deleted_at=timeutils.utcnow())
+ conn.execute(update_statement)
+
qiim = sql.select(self.instance_id_mappings).where(
self.instance_id_mappings.c.uuid.in_(self.uuidstrs)
)
- rows = self.conn.execute(qiim).fetchall()
- # Verify we have 6 in main
- self.assertEqual(len(rows), 6)
qsiim = sql.select(self.shadow_instance_id_mappings).where(
self.shadow_instance_id_mappings.c.uuid.in_(self.uuidstrs)
)
- rows = self.conn.execute(qsiim).fetchall()
- # Verify we have 0 in shadow
- self.assertEqual(len(rows), 0)
- # Archive 2 rows
+
+ with self.engine.connect() as conn, conn.begin():
+ # Verify we have 6 in main
+ rows = conn.execute(qiim).fetchall()
+ self.assertEqual(len(rows), 6)
+ # Verify we have 0 in shadow
+ rows = conn.execute(qsiim).fetchall()
+ self.assertEqual(len(rows), 0)
+
results = db.archive_deleted_rows(max_rows=2)
expected = dict(instance_id_mappings=2)
self._assertEqualObjects(expected, results[0])
- rows = self.conn.execute(qiim).fetchall()
- # Verify we have 4 left in main
- self.assertEqual(len(rows), 4)
- rows = self.conn.execute(qsiim).fetchall()
- # Verify we have 2 in shadow
- self.assertEqual(len(rows), 2)
+
+ with self.engine.connect() as conn, conn.begin():
+ # Archive 2 rows and verify we have 4 left in main
+ rows = conn.execute(qiim).fetchall()
+ self.assertEqual(len(rows), 4)
+ # Verify we have 2 in shadow
+ rows = conn.execute(qsiim).fetchall()
+ self.assertEqual(len(rows), 2)
# Archive 2 more rows
results = db.archive_deleted_rows(max_rows=2)
expected = dict(instance_id_mappings=2)
self._assertEqualObjects(expected, results[0])
- rows = self.conn.execute(qiim).fetchall()
- # Verify we have 2 left in main
- self.assertEqual(len(rows), 2)
- rows = self.conn.execute(qsiim).fetchall()
- # Verify we have 4 in shadow
- self.assertEqual(len(rows), 4)
+
+ with self.engine.connect() as conn, conn.begin():
+ # Verify we have 2 left in main
+ rows = conn.execute(qiim).fetchall()
+ self.assertEqual(len(rows), 2)
+ # Verify we have 4 in shadow
+ rows = conn.execute(qsiim).fetchall()
+ self.assertEqual(len(rows), 4)
+
# Try to archive more, but there are no deleted rows left.
results = db.archive_deleted_rows(max_rows=2)
expected = dict()
self._assertEqualObjects(expected, results[0])
- rows = self.conn.execute(qiim).fetchall()
- # Verify we still have 2 left in main
- self.assertEqual(len(rows), 2)
- rows = self.conn.execute(qsiim).fetchall()
- # Verify we still have 4 in shadow
- self.assertEqual(len(rows), 4)
+
+ with self.engine.connect() as conn, conn.begin():
+ # Verify we still have 2 left in main
+ rows = conn.execute(qiim).fetchall()
+ self.assertEqual(len(rows), 2)
+ # Verify we still have 4 in shadow
+ rows = conn.execute(qsiim).fetchall()
+ self.assertEqual(len(rows), 4)
# Ensure only deleted rows were deleted
self._assert_shadow_tables_empty_except(
@@ -5804,34 +5817,45 @@ class ArchiveTestCase(test.TestCase, ModelsObjectComparatorMixin):
# Add 6 rows to table
for uuidstr in self.uuidstrs:
ins_stmt = self.instances.insert().values(uuid=uuidstr)
- self.conn.execute(ins_stmt)
- ins_stmt = self.instance_actions.insert().\
- values(instance_uuid=uuidstr)
- result = self.conn.execute(ins_stmt)
+ with self.engine.connect() as conn, conn.begin():
+ conn.execute(ins_stmt)
+
+ ins_stmt = self.instance_actions.insert().values(
+ instance_uuid=uuidstr,
+ )
+ with self.engine.connect() as conn, conn.begin():
+ result = conn.execute(ins_stmt)
+
instance_action_uuid = result.inserted_primary_key[0]
- ins_stmt = self.instance_actions_events.insert().\
- values(action_id=instance_action_uuid)
- self.conn.execute(ins_stmt)
+ ins_stmt = self.instance_actions_events.insert().values(
+ action_id=instance_action_uuid,
+ )
+ with self.engine.connect() as conn, conn.begin():
+ conn.execute(ins_stmt)
# Set 1 to deleted before 2017-01-01
deleted_at = timeutils.parse_strtime('2017-01-01T00:00:00.0')
- update_statement = self.instances.update().\
- where(self.instances.c.uuid.in_(self.uuidstrs[0:1]))\
- .values(deleted=1, deleted_at=deleted_at)
- self.conn.execute(update_statement)
+ update_statement = self.instances.update().where(
+ self.instances.c.uuid.in_(self.uuidstrs[0:1])
+ ).values(deleted=1, deleted_at=deleted_at)
+ with self.engine.connect() as conn, conn.begin():
+ conn.execute(update_statement)
# Set 1 to deleted before 2017-01-02
deleted_at = timeutils.parse_strtime('2017-01-02T00:00:00.0')
- update_statement = self.instances.update().\
- where(self.instances.c.uuid.in_(self.uuidstrs[1:2]))\
- .values(deleted=1, deleted_at=deleted_at)
- self.conn.execute(update_statement)
+ update_statement = self.instances.update().where(
+ self.instances.c.uuid.in_(self.uuidstrs[1:2])
+ ).values(deleted=1, deleted_at=deleted_at)
+ with self.engine.connect() as conn, conn.begin():
+ conn.execute(update_statement)
# Set 2 to deleted now
- update_statement = self.instances.update().\
- where(self.instances.c.uuid.in_(self.uuidstrs[2:4]))\
- .values(deleted=1, deleted_at=timeutils.utcnow())
- self.conn.execute(update_statement)
+ update_statement = self.instances.update().where(
+ self.instances.c.uuid.in_(self.uuidstrs[2:4])
+ ).values(deleted=1, deleted_at=timeutils.utcnow())
+ with self.engine.connect() as conn, conn.begin():
+ conn.execute(update_statement)
+
qiim = sql.select(self.instances).where(
self. instances.c.uuid.in_(self.uuidstrs)
)
@@ -5839,9 +5863,11 @@ class ArchiveTestCase(test.TestCase, ModelsObjectComparatorMixin):
self.shadow_instances.c.uuid.in_(self.uuidstrs)
)
- # Verify we have 6 in main
- rows = self.conn.execute(qiim).fetchall()
- self.assertEqual(len(rows), 6)
+ with self.engine.connect() as conn, conn.begin():
+ # Verify we have 6 in main
+ rows = conn.execute(qiim).fetchall()
+ self.assertEqual(len(rows), 6)
+
# Make sure 'before' comparison is for < not <=, nothing deleted
before_date = dateutil_parser.parse('2017-01-01', fuzzy=True)
_, uuids, _ = db.archive_deleted_rows(max_rows=1, before=before_date)
@@ -5875,22 +5901,25 @@ class ArchiveTestCase(test.TestCase, ModelsObjectComparatorMixin):
expected = {}
self._assertEqualObjects(expected, results[0])
- # Verify we have 4 left in main
- rows = self.conn.execute(qiim).fetchall()
- self.assertEqual(len(rows), 4)
- # Verify we have 2 in shadow
- rows = self.conn.execute(qsiim).fetchall()
- self.assertEqual(len(rows), 2)
+ with self.engine.connect() as conn, conn.begin():
+ # Verify we have 4 left in main
+ rows = conn.execute(qiim).fetchall()
+ self.assertEqual(len(rows), 4)
+ # Verify we have 2 in shadow
+ rows = conn.execute(qsiim).fetchall()
+ self.assertEqual(len(rows), 2)
# Archive everything else, make sure default operation without
# before argument didn't break
results = db.archive_deleted_rows(max_rows=1000)
- # Verify we have 2 left in main
- rows = self.conn.execute(qiim).fetchall()
- self.assertEqual(len(rows), 2)
- # Verify we have 4 in shadow
- rows = self.conn.execute(qsiim).fetchall()
- self.assertEqual(len(rows), 4)
+
+ with self.engine.connect() as conn, conn.begin():
+ # Verify we have 2 left in main
+ rows = conn.execute(qiim).fetchall()
+ self.assertEqual(len(rows), 2)
+ # Verify we have 4 in shadow
+ rows = conn.execute(qsiim).fetchall()
+ self.assertEqual(len(rows), 4)
def test_archive_deleted_rows_for_every_uuid_table(self):
tablenames = []
@@ -5918,94 +5947,117 @@ class ArchiveTestCase(test.TestCase, ModelsObjectComparatorMixin):
for uuidstr in self.uuidstrs:
ins_stmt = main_table.insert().values(uuid=uuidstr)
try:
- self.conn.execute(ins_stmt)
+ with self.engine.connect() as conn, conn.begin():
+ conn.execute(ins_stmt)
except (db_exc.DBError, sqla_exc.OperationalError):
# This table has constraints that require a table-specific
# insert, so skip it.
return 2
# Set 4 to deleted
- update_statement = main_table.update().\
- where(main_table.c.uuid.in_(self.uuidstrs[:4]))\
- .values(deleted=1, deleted_at=timeutils.utcnow())
- self.conn.execute(update_statement)
+ update_statement = main_table.update().where(
+ main_table.c.uuid.in_(self.uuidstrs[:4])
+ ).values(deleted=1, deleted_at=timeutils.utcnow())
+ with self.engine.connect() as conn, conn.begin():
+ conn.execute(update_statement)
+
qmt = sql.select(main_table).where(
main_table.c.uuid.in_(self.uuidstrs)
)
- rows = self.conn.execute(qmt).fetchall()
- # Verify we have 6 in main
- self.assertEqual(len(rows), 6)
qst = sql.select(shadow_table).where(
shadow_table.c.uuid.in_(self.uuidstrs)
)
- rows = self.conn.execute(qst).fetchall()
- # Verify we have 0 in shadow
- self.assertEqual(len(rows), 0)
+
+ with self.engine.connect() as conn, conn.begin():
+ # Verify we have 6 in main
+ rows = conn.execute(qmt).fetchall()
+ self.assertEqual(len(rows), 6)
+ # Verify we have 0 in shadow
+ rows = conn.execute(qst).fetchall()
+ self.assertEqual(len(rows), 0)
+
# Archive 2 rows
db._archive_deleted_rows_for_table(
self.metadata, self.engine, tablename, max_rows=2, before=None,
task_log=False,
)
- # Verify we have 4 left in main
- rows = self.conn.execute(qmt).fetchall()
- self.assertEqual(len(rows), 4)
- # Verify we have 2 in shadow
- rows = self.conn.execute(qst).fetchall()
- self.assertEqual(len(rows), 2)
+
+ with self.engine.connect() as conn, conn.begin():
+ # Verify we have 4 left in main
+ rows = conn.execute(qmt).fetchall()
+ self.assertEqual(len(rows), 4)
+ # Verify we have 2 in shadow
+ rows = conn.execute(qst).fetchall()
+ self.assertEqual(len(rows), 2)
+
# Archive 2 more rows
db._archive_deleted_rows_for_table(
self.metadata, self.engine, tablename, max_rows=2, before=None,
task_log=False,
)
- # Verify we have 2 left in main
- rows = self.conn.execute(qmt).fetchall()
- self.assertEqual(len(rows), 2)
- # Verify we have 4 in shadow
- rows = self.conn.execute(qst).fetchall()
- self.assertEqual(len(rows), 4)
+
+ with self.engine.connect() as conn, conn.begin():
+ # Verify we have 2 left in main
+ rows = conn.execute(qmt).fetchall()
+ self.assertEqual(len(rows), 2)
+ # Verify we have 4 in shadow
+ rows = conn.execute(qst).fetchall()
+ self.assertEqual(len(rows), 4)
+
# Try to archive more, but there are no deleted rows left.
db._archive_deleted_rows_for_table(
self.metadata, self.engine, tablename, max_rows=2, before=None,
task_log=False,
)
- # Verify we still have 2 left in main
- rows = self.conn.execute(qmt).fetchall()
- self.assertEqual(len(rows), 2)
- # Verify we still have 4 in shadow
- rows = self.conn.execute(qst).fetchall()
- self.assertEqual(len(rows), 4)
+
+ with self.engine.connect() as conn, conn.begin():
+ # Verify we still have 2 left in main
+ rows = conn.execute(qmt).fetchall()
+ self.assertEqual(len(rows), 2)
+ # Verify we still have 4 in shadow
+ rows = conn.execute(qst).fetchall()
+ self.assertEqual(len(rows), 4)
+
return 0
def test_archive_deleted_rows_shadow_insertions_equals_deletions(self):
# Add 2 rows to table
for uuidstr in self.uuidstrs[:2]:
ins_stmt = self.instance_id_mappings.insert().values(uuid=uuidstr)
- self.conn.execute(ins_stmt)
+ with self.engine.connect() as conn, conn.begin():
+ conn.execute(ins_stmt)
+
# Set both to deleted
- update_statement = self.instance_id_mappings.update().\
- where(self.instance_id_mappings.c.uuid.in_(self.uuidstrs[:2]))\
- .values(deleted=1)
- self.conn.execute(update_statement)
+ update_statement = self.instance_id_mappings.update().where(
+ self.instance_id_mappings.c.uuid.in_(self.uuidstrs[:2])
+ ).values(deleted=1)
+ with self.engine.connect() as conn, conn.begin():
+ conn.execute(update_statement)
+
qiim = sql.select(self.instance_id_mappings).where(
self. instance_id_mappings.c.uuid.in_(self.uuidstrs[:2])
)
- rows = self.conn.execute(qiim).fetchall()
- # Verify we have 2 in main
- self.assertEqual(len(rows), 2)
-
qsiim = sql.select(self.shadow_instance_id_mappings).where(
self.shadow_instance_id_mappings.c.uuid.in_(self.uuidstrs[:2])
)
- shadow_rows = self.conn.execute(qsiim).fetchall()
- # Verify we have 0 in shadow
- self.assertEqual(len(shadow_rows), 0)
+
+ with self.engine.connect() as conn, conn.begin():
+ # Verify we have 2 in main
+ rows = conn.execute(qiim).fetchall()
+ self.assertEqual(len(rows), 2)
+ # Verify we have 0 in shadow
+ rows = conn.execute(qsiim).fetchall()
+ self.assertEqual(len(rows), 0)
# Archive the rows
db.archive_deleted_rows(max_rows=2)
- main_rows = self.conn.execute(qiim).fetchall()
- shadow_rows = self.conn.execute(qsiim).fetchall()
- # Verify the insertions into shadow is same as deletions from main
- self.assertEqual(len(shadow_rows), len(rows) - len(main_rows))
+ with self.engine.connect() as conn, conn.begin():
+ # Verify we now have 0 in main
+ rows = conn.execute(qiim).fetchall()
+ self.assertEqual(len(rows), 0)
+ # Verify we now have 2 in shadow
+ rows = conn.execute(qsiim).fetchall()
+ self.assertEqual(len(rows), 2)
def test_archive_deleted_rows_for_migrations(self):
# migrations.instance_uuid depends on instances.uuid
@@ -6015,13 +6067,18 @@ class ArchiveTestCase(test.TestCase, ModelsObjectComparatorMixin):
instance_uuid = uuidsentinel.instance
ins_stmt = self.instances.insert().values(
- uuid=instance_uuid,
- deleted=1,
- deleted_at=timeutils.utcnow())
- self.conn.execute(ins_stmt)
- ins_stmt = self.migrations.insert().values(instance_uuid=instance_uuid,
- deleted=0)
- self.conn.execute(ins_stmt)
+ uuid=instance_uuid,
+ deleted=1,
+ deleted_at=timeutils.utcnow(),
+ )
+ with self.engine.connect() as conn, conn.begin():
+ conn.execute(ins_stmt)
+
+ ins_stmt = self.migrations.insert().values(
+ instance_uuid=instance_uuid, deleted=0,
+ )
+ with self.engine.connect() as conn, conn.begin():
+ conn.execute(ins_stmt)
# Archiving instances should result in migrations related to the
# instances also being archived.
num = db._archive_deleted_rows_for_table(
@@ -6037,70 +6094,86 @@ class ArchiveTestCase(test.TestCase, ModelsObjectComparatorMixin):
# Add 6 rows to each table
for uuidstr in self.uuidstrs:
ins_stmt = self.instance_id_mappings.insert().values(uuid=uuidstr)
- self.conn.execute(ins_stmt)
+ with self.engine.connect() as conn, conn.begin():
+ conn.execute(ins_stmt)
ins_stmt2 = self.instances.insert().values(uuid=uuidstr)
- self.conn.execute(ins_stmt2)
+ with self.engine.connect() as conn, conn.begin():
+ conn.execute(ins_stmt2)
# Set 4 of each to deleted
update_statement = self.instance_id_mappings.update().\
where(self.instance_id_mappings.c.uuid.in_(self.uuidstrs[:4]))\
.values(deleted=1, deleted_at=timeutils.utcnow())
- self.conn.execute(update_statement)
+ with self.engine.connect() as conn, conn.begin():
+ conn.execute(update_statement)
update_statement2 = self.instances.update().\
where(self.instances.c.uuid.in_(self.uuidstrs[:4]))\
.values(deleted=1, deleted_at=timeutils.utcnow())
- self.conn.execute(update_statement2)
- # Verify we have 6 in each main table
+ with self.engine.connect() as conn, conn.begin():
+ conn.execute(update_statement2)
+
qiim = sql.select(self.instance_id_mappings).where(
self.instance_id_mappings.c.uuid.in_(self.uuidstrs)
)
- rows = self.conn.execute(qiim).fetchall()
- self.assertEqual(len(rows), 6)
qi = sql.select(self.instances).where(
self.instances.c.uuid.in_(self.uuidstrs)
)
- rows = self.conn.execute(qi).fetchall()
- self.assertEqual(len(rows), 6)
- # Verify we have 0 in each shadow table
qsiim = sql.select(self.shadow_instance_id_mappings).where(
self.shadow_instance_id_mappings.c.uuid.in_(self.uuidstrs)
)
- rows = self.conn.execute(qsiim).fetchall()
- self.assertEqual(len(rows), 0)
qsi = sql.select(self.shadow_instances).where(
self.shadow_instances.c.uuid.in_(self.uuidstrs)
)
- rows = self.conn.execute(qsi).fetchall()
- self.assertEqual(len(rows), 0)
+
+ with self.engine.connect() as conn, conn.begin():
+ # Verify we have 6 in each main table
+ rows = conn.execute(qiim).fetchall()
+ self.assertEqual(len(rows), 6)
+ rows = conn.execute(qi).fetchall()
+ self.assertEqual(len(rows), 6)
+ # Verify we have 0 in each shadow table
+ rows = conn.execute(qsiim).fetchall()
+ self.assertEqual(len(rows), 0)
+ rows = conn.execute(qsi).fetchall()
+ self.assertEqual(len(rows), 0)
+
# Archive 7 rows, which should be 4 in one table and 3 in the other.
db.archive_deleted_rows(max_rows=7)
- # Verify we have 5 left in the two main tables combined
- iim_rows = self.conn.execute(qiim).fetchall()
- i_rows = self.conn.execute(qi).fetchall()
- self.assertEqual(len(iim_rows) + len(i_rows), 5)
- # Verify we have 7 in the two shadow tables combined.
- siim_rows = self.conn.execute(qsiim).fetchall()
- si_rows = self.conn.execute(qsi).fetchall()
- self.assertEqual(len(siim_rows) + len(si_rows), 7)
+
+ with self.engine.connect() as conn, conn.begin():
+ # Verify we have 5 left in the two main tables combined
+ iim_rows = conn.execute(qiim).fetchall()
+ i_rows = conn.execute(qi).fetchall()
+ self.assertEqual(len(iim_rows) + len(i_rows), 5)
+ # Verify we have 7 in the two shadow tables combined.
+ siim_rows = conn.execute(qsiim).fetchall()
+ si_rows = conn.execute(qsi).fetchall()
+ self.assertEqual(len(siim_rows) + len(si_rows), 7)
# Archive the remaining deleted rows.
db.archive_deleted_rows(max_rows=1)
- # Verify we have 4 total left in both main tables.
- iim_rows = self.conn.execute(qiim).fetchall()
- i_rows = self.conn.execute(qi).fetchall()
- self.assertEqual(len(iim_rows) + len(i_rows), 4)
- # Verify we have 8 in shadow
- siim_rows = self.conn.execute(qsiim).fetchall()
- si_rows = self.conn.execute(qsi).fetchall()
- self.assertEqual(len(siim_rows) + len(si_rows), 8)
+
+ with self.engine.connect() as conn, conn.begin():
+ # Verify we have 4 total left in both main tables.
+ iim_rows = conn.execute(qiim).fetchall()
+ i_rows = conn.execute(qi).fetchall()
+ self.assertEqual(len(iim_rows) + len(i_rows), 4)
+ # Verify we have 8 in shadow
+ siim_rows = conn.execute(qsiim).fetchall()
+ si_rows = conn.execute(qsi).fetchall()
+ self.assertEqual(len(siim_rows) + len(si_rows), 8)
+
# Try to archive more, but there are no deleted rows left.
db.archive_deleted_rows(max_rows=500)
- # Verify we have 4 total left in both main tables.
- iim_rows = self.conn.execute(qiim).fetchall()
- i_rows = self.conn.execute(qi).fetchall()
- self.assertEqual(len(iim_rows) + len(i_rows), 4)
- # Verify we have 8 in shadow
- siim_rows = self.conn.execute(qsiim).fetchall()
- si_rows = self.conn.execute(qsi).fetchall()
- self.assertEqual(len(siim_rows) + len(si_rows), 8)
+
+ with self.engine.connect() as conn, conn.begin():
+ # Verify we have 4 total left in both main tables.
+ iim_rows = conn.execute(qiim).fetchall()
+ i_rows = conn.execute(qi).fetchall()
+ self.assertEqual(len(iim_rows) + len(i_rows), 4)
+ # Verify we have 8 in shadow
+ siim_rows = conn.execute(qsiim).fetchall()
+ si_rows = conn.execute(qsi).fetchall()
+ self.assertEqual(len(siim_rows) + len(si_rows), 8)
+
self._assert_shadow_tables_empty_except(
'shadow_instances',
'shadow_instance_id_mappings'
@@ -6112,34 +6185,47 @@ class ArchiveTestCase(test.TestCase, ModelsObjectComparatorMixin):
ins_stmt = self.task_log.insert().values(
id=i, task_name='instance_usage_audit', state='DONE',
host='host', message='message')
- self.conn.execute(ins_stmt)
+ with self.engine.connect() as conn, conn.begin():
+ conn.execute(ins_stmt)
+
# Set 1 to updated before 2017-01-01
updated_at = timeutils.parse_strtime('2017-01-01T00:00:00.0')
update_statement = self.task_log.update().where(
- self.task_log.c.id == 1).values(updated_at=updated_at)
- self.conn.execute(update_statement)
+ self.task_log.c.id == 1
+ ).values(updated_at=updated_at)
+ with self.engine.connect() as conn, conn.begin():
+ conn.execute(update_statement)
+
# Set 1 to updated before 2017-01-02
updated_at = timeutils.parse_strtime('2017-01-02T00:00:00.0')
update_statement = self.task_log.update().where(
- self.task_log.c.id == 2).values(updated_at=updated_at)
- self.conn.execute(update_statement)
+ self.task_log.c.id == 2
+ ).values(updated_at=updated_at)
+ with self.engine.connect() as conn, conn.begin():
+ conn.execute(update_statement)
+
# Set 2 to updated now
update_statement = self.task_log.update().where(
- self.task_log.c.id.in_(range(3, 5))).values(
- updated_at=timeutils.utcnow())
- self.conn.execute(update_statement)
- # Verify we have 6 in main
+ self.task_log.c.id.in_(range(3, 5))
+ ).values(updated_at=timeutils.utcnow())
+ with self.engine.connect() as conn, conn.begin():
+ conn.execute(update_statement)
+
qtl = sql.select(self.task_log).where(
self.task_log.c.id.in_(range(1, 7))
)
- rows = self.conn.execute(qtl).fetchall()
- self.assertEqual(len(rows), 6)
- # Verify we have 0 in shadow
qstl = sql.select(self.shadow_task_log).where(
self.shadow_task_log.c.id.in_(range(1, 7))
)
- rows = self.conn.execute(qstl).fetchall()
- self.assertEqual(len(rows), 0)
+
+ with self.engine.connect() as conn, conn.begin():
+ # Verify we have 6 in main
+ rows = conn.execute(qtl).fetchall()
+ self.assertEqual(len(rows), 6)
+ # Verify we have 0 in shadow
+ rows = conn.execute(qstl).fetchall()
+ self.assertEqual(len(rows), 0)
+
# Make sure 'before' comparison is for < not <=
before_date = dateutil_parser.parse('2017-01-01', fuzzy=True)
_, _, rows = db.archive_deleted_rows(
@@ -6161,22 +6247,27 @@ class ArchiveTestCase(test.TestCase, ModelsObjectComparatorMixin):
results = db.archive_deleted_rows(max_rows=2, task_log=True)
expected = dict(task_log=2)
self._assertEqualObjects(expected, results[0])
- # Verify we have 2 left in main
- rows = self.conn.execute(qtl).fetchall()
- self.assertEqual(len(rows), 2)
- # Verify we have 4 in shadow
- rows = self.conn.execute(qstl).fetchall()
- self.assertEqual(len(rows), 4)
+
+ with self.engine.connect() as conn, conn.begin():
+ # Verify we have 2 left in main
+ rows = conn.execute(qtl).fetchall()
+ self.assertEqual(len(rows), 2)
+ # Verify we have 4 in shadow
+ rows = conn.execute(qstl).fetchall()
+ self.assertEqual(len(rows), 4)
+
# Archive the rest
results = db.archive_deleted_rows(max_rows=100, task_log=True)
expected = dict(task_log=2)
self._assertEqualObjects(expected, results[0])
- # Verify we have 0 left in main
- rows = self.conn.execute(qtl).fetchall()
- self.assertEqual(len(rows), 0)
- # Verify we have 6 in shadow
- rows = self.conn.execute(qstl).fetchall()
- self.assertEqual(len(rows), 6)
+
+ with self.engine.connect() as conn, conn.begin():
+ # Verify we have 0 left in main
+ rows = conn.execute(qtl).fetchall()
+ self.assertEqual(len(rows), 0)
+ # Verify we have 6 in shadow
+ rows = conn.execute(qstl).fetchall()
+ self.assertEqual(len(rows), 6)
class PciDeviceDBApiTestCase(test.TestCase, ModelsObjectComparatorMixin):
diff --git a/nova/tests/unit/db/main/test_migrations.py b/nova/tests/unit/db/main/test_migrations.py
index f5ce3697b3..579888cfd2 100644
--- a/nova/tests/unit/db/main/test_migrations.py
+++ b/nova/tests/unit/db/main/test_migrations.py
@@ -25,11 +25,11 @@ test will then use that DB and username/password combo to run the tests. Refer
to the 'tools/test-setup.sh' for an example of how to configure this.
"""
+from unittest import mock
+
from alembic import command as alembic_api
from alembic import script as alembic_script
import fixtures
-from migrate.versioning import api as migrate_api
-import mock
from oslo_db.sqlalchemy import enginefacade
from oslo_db.sqlalchemy import test_fixtures
from oslo_db.sqlalchemy import test_migrations
@@ -173,47 +173,6 @@ class TestModelsSyncPostgreSQL(
FIXTURE = test_fixtures.PostgresqlOpportunisticFixture
-class NovaModelsMigrationsLegacySync(NovaModelsMigrationsSync):
- """Test that the models match the database after old migrations are run."""
-
- def db_sync(self, engine):
- # the 'nova.db.migration.db_sync' method will not use the legacy
- # sqlalchemy-migrate-based migration flow unless the database is
- # already controlled with sqlalchemy-migrate, so we need to manually
- # enable version controlling with this tool to test this code path
- repository = migration._find_migrate_repo(database='main')
- migrate_api.version_control(
- engine, repository, migration.MIGRATE_INIT_VERSION['main'])
-
- # now we can apply migrations as expected and the legacy path will be
- # followed
- super().db_sync(engine)
-
-
-class TestModelsLegacySyncSQLite(
- NovaModelsMigrationsLegacySync,
- test_fixtures.OpportunisticDBTestMixin,
- base.BaseTestCase,
-):
- pass
-
-
-class TestModelsLegacySyncMySQL(
- NovaModelsMigrationsLegacySync,
- test_fixtures.OpportunisticDBTestMixin,
- base.BaseTestCase,
-):
- FIXTURE = test_fixtures.MySQLOpportunisticFixture
-
-
-class TestModelsLegacySyncPostgreSQL(
- NovaModelsMigrationsLegacySync,
- test_fixtures.OpportunisticDBTestMixin,
- base.BaseTestCase,
-):
- FIXTURE = test_fixtures.PostgresqlOpportunisticFixture
-
-
class NovaMigrationsWalk(
test_fixtures.OpportunisticDBTestMixin, test.NoDBTestCase,
):
@@ -226,7 +185,7 @@ class NovaMigrationsWalk(
super().setUp()
self.engine = enginefacade.writer.get_engine()
self.config = migration._find_alembic_conf('main')
- self.init_version = migration.ALEMBIC_INIT_VERSION['main']
+ self.init_version = '8f2f1571d55b'
def assertIndexExists(self, connection, table_name, index):
self.assertTrue(
@@ -240,6 +199,12 @@ class NovaMigrationsWalk(
'Index %s on table %s should not exist' % (index, table_name),
)
+ def assertColumnExists(self, connection, table_name, column):
+ self.assertTrue(
+ oslodbutils.column_exists(connection, table_name, column),
+ 'Column %s on table %s should exist' % (column, table_name),
+ )
+
def _migrate_up(self, connection, revision):
if revision == self.init_version: # no tests for the initial revision
alembic_api.upgrade(self.config, revision)
@@ -280,6 +245,42 @@ class NovaMigrationsWalk(
# no check for the MySQL-specific change
+ def _check_ccb0fa1a2252(self, connection):
+ for prefix in ('', 'shadow_'):
+ table_name = prefix + 'block_device_mapping'
+ table = oslodbutils.get_table(connection, table_name)
+
+ self.assertColumnExists(connection, table_name, 'encrypted')
+ self.assertColumnExists(
+ connection, table_name, 'encryption_secret_uuid')
+ self.assertColumnExists(
+ connection, table_name, 'encryption_format')
+ self.assertColumnExists(
+ connection, table_name, 'encryption_options')
+
+ # Only check for the expected types if we're using sqlite because
+ # other databases' types may be different. For example, Boolean
+ # may be represented as an integer in MySQL
+ if connection.engine.name != 'sqlite':
+ return
+
+ self.assertIsInstance(table.c.encrypted.type, sa.types.Boolean)
+ self.assertIsInstance(
+ table.c.encryption_secret_uuid.type, sa.types.String)
+ self.assertIsInstance(
+ table.c.encryption_format.type, sa.types.String)
+ self.assertIsInstance(
+ table.c.encryption_options.type, sa.types.String)
+
+ def _check_960aac0e09ea(self, connection):
+ self.assertIndexNotExists(
+ connection, 'console_auth_tokens',
+ 'console_auth_tokens_token_hash_idx',
+ )
+ self.assertIndexNotExists(
+ connection, 'instances', 'uuid',
+ )
+
def test_single_base_revision(self):
"""Ensure we only have a single base revision.
diff --git a/nova/tests/unit/db/test_migration.py b/nova/tests/unit/db/test_migration.py
index 6657bc48e0..17a099a8cc 100644
--- a/nova/tests/unit/db/test_migration.py
+++ b/nova/tests/unit/db/test_migration.py
@@ -12,14 +12,10 @@
# License for the specific language governing permissions and limitations
# under the License.
-import glob
-import os
+from unittest import mock
import urllib
from alembic.runtime import migration as alembic_migration
-from migrate import exceptions as migrate_exceptions
-from migrate.versioning import api as migrate_api
-import mock
from nova.db.api import api as api_db_api
from nova.db.main import api as main_db_api
@@ -56,7 +52,7 @@ class TestDBURL(test.NoDBTestCase):
class TestDBSync(test.NoDBTestCase):
- def test_db_sync_invalid_databse(self):
+ def test_db_sync_invalid_database(self):
"""We only have two databases."""
self.assertRaises(
exception.Invalid, migration.db_sync, database='invalid')
@@ -68,17 +64,9 @@ class TestDBSync(test.NoDBTestCase):
migration.db_sync, '402')
@mock.patch.object(migration, '_upgrade_alembic')
- @mock.patch.object(migration, '_init_alembic_on_legacy_database')
- @mock.patch.object(migration, '_is_database_under_alembic_control')
- @mock.patch.object(migration, '_is_database_under_migrate_control')
@mock.patch.object(migration, '_find_alembic_conf')
- @mock.patch.object(migration, '_find_migrate_repo')
@mock.patch.object(migration, '_get_engine')
- def _test_db_sync(
- self, has_migrate, has_alembic, mock_get_engine, mock_find_repo,
- mock_find_conf, mock_is_migrate, mock_is_alembic, mock_init,
- mock_upgrade,
- ):
+ def test_db_sync(self, mock_get_engine, mock_find_conf, mock_upgrade):
# return an encoded URL to mimic sqlalchemy
mock_get_engine.return_value.url = (
@@ -86,13 +74,10 @@ class TestDBSync(test.NoDBTestCase):
'read_default_file=%2Fetc%2Fmy.cnf.d%2Fnova.cnf'
'&read_default_group=nova'
)
- mock_is_migrate.return_value = has_migrate
- mock_is_alembic.return_value = has_alembic
migration.db_sync()
mock_get_engine.assert_called_once_with('main', context=None)
- mock_find_repo.assert_called_once_with('main')
mock_find_conf.assert_called_once_with('main')
mock_find_conf.return_value.set_main_option.assert_called_once_with(
'sqlalchemy.url',
@@ -100,93 +85,25 @@ class TestDBSync(test.NoDBTestCase):
'read_default_file=%%2Fetc%%2Fmy.cnf.d%%2Fnova.cnf' # ...
'&read_default_group=nova'
)
- mock_is_migrate.assert_called_once_with(
- mock_get_engine.return_value, mock_find_repo.return_value)
-
- if has_migrate:
- mock_is_alembic.assert_called_once_with(
- mock_get_engine.return_value)
- else:
- mock_is_alembic.assert_not_called()
-
- # we should only attempt the upgrade of the remaining
- # sqlalchemy-migrate-based migrations and fake apply of the initial
- # alembic migrations if sqlalchemy-migrate is in place but alembic
- # hasn't been used yet
- if has_migrate and not has_alembic:
- mock_init.assert_called_once_with(
- mock_get_engine.return_value, 'main',
- mock_find_repo.return_value, mock_find_conf.return_value)
- else:
- mock_init.assert_not_called()
- # however, we should always attempt to upgrade the requested migration
- # to alembic
mock_upgrade.assert_called_once_with(
- mock_get_engine.return_value, mock_find_conf.return_value, None)
-
- def test_db_sync_new_deployment(self):
- """Mimic a new deployment without existing sqlalchemy-migrate cruft."""
- has_migrate = False
- has_alembic = False
- self._test_db_sync(has_migrate, has_alembic)
-
- def test_db_sync_with_existing_migrate_database(self):
- """Mimic a deployment currently managed by sqlalchemy-migrate."""
- has_migrate = True
- has_alembic = False
- self._test_db_sync(has_migrate, has_alembic)
-
- def test_db_sync_with_existing_alembic_database(self):
- """Mimic a deployment that's already switched to alembic."""
- has_migrate = True
- has_alembic = True
- self._test_db_sync(has_migrate, has_alembic)
+ mock_get_engine.return_value, mock_find_conf.return_value, None,
+ )
@mock.patch.object(alembic_migration.MigrationContext, 'configure')
-@mock.patch.object(migrate_api, 'db_version')
-@mock.patch.object(migration, '_is_database_under_alembic_control')
-@mock.patch.object(migration, '_is_database_under_migrate_control')
@mock.patch.object(migration, '_get_engine')
-@mock.patch.object(migration, '_find_migrate_repo')
class TestDBVersion(test.NoDBTestCase):
- def test_db_version_invalid_databse(
- self, mock_find_repo, mock_get_engine, mock_is_migrate,
- mock_is_alembic, mock_migrate_version, mock_m_context_configure,
+ def test_db_version_invalid_database(
+ self, mock_get_engine, mock_m_context_configure,
):
"""We only have two databases."""
self.assertRaises(
exception.Invalid, migration.db_version, database='invalid')
- def test_db_version_migrate(
- self, mock_find_repo, mock_get_engine, mock_is_migrate,
- mock_is_alembic, mock_migrate_version, mock_m_context_configure,
- ):
- """Database is controlled by sqlalchemy-migrate."""
- mock_is_migrate.return_value = True
- mock_is_alembic.return_value = False
-
- ret = migration.db_version('main')
- self.assertEqual(mock_migrate_version.return_value, ret)
-
- mock_find_repo.assert_called_once_with('main')
- mock_get_engine.assert_called_once_with('main', context=None)
- mock_is_migrate.assert_called_once()
- mock_is_alembic.assert_called_once()
- mock_migrate_version.assert_called_once_with(
- mock_get_engine.return_value, mock_find_repo.return_value)
- mock_m_context_configure.assert_not_called()
-
- def test_db_version_alembic(
- self, mock_find_repo, mock_get_engine, mock_is_migrate,
- mock_is_alembic, mock_migrate_version, mock_m_context_configure,
- ):
+ def test_db_version(self, mock_get_engine, mock_m_context_configure):
"""Database is controlled by alembic."""
- mock_is_migrate.return_value = False
- mock_is_alembic.return_value = True
-
ret = migration.db_version('main')
mock_m_context = mock_m_context_configure.return_value
self.assertEqual(
@@ -194,31 +111,9 @@ class TestDBVersion(test.NoDBTestCase):
ret
)
- mock_find_repo.assert_called_once_with('main')
mock_get_engine.assert_called_once_with('main', context=None)
- mock_is_migrate.assert_called_once()
- mock_is_alembic.assert_called_once()
- mock_migrate_version.assert_not_called()
mock_m_context_configure.assert_called_once()
- def test_db_version_not_controlled(
- self, mock_find_repo, mock_get_engine, mock_is_migrate,
- mock_is_alembic, mock_migrate_version, mock_m_context_configure,
- ):
- """Database is not controlled."""
- mock_is_migrate.return_value = False
- mock_is_alembic.return_value = False
-
- ret = migration.db_version()
- self.assertIsNone(ret)
-
- mock_find_repo.assert_called_once_with('main')
- mock_get_engine.assert_called_once_with('main', context=None)
- mock_is_migrate.assert_called_once()
- mock_is_alembic.assert_called_once()
- mock_migrate_version.assert_not_called()
- mock_m_context_configure.assert_not_called()
-
class TestGetEngine(test.NoDBTestCase):
@@ -237,77 +132,3 @@ class TestGetEngine(test.NoDBTestCase):
engine = migration._get_engine('api')
self.assertEqual('engine', engine)
mock_get_engine.assert_called_once_with()
-
-
-class TestDatabaseUnderVersionControl(test.NoDBTestCase):
-
- @mock.patch.object(migrate_api, 'db_version')
- def test__is_database_under_migrate_control__true(self, mock_db_version):
- ret = migration._is_database_under_migrate_control('engine', 'repo')
- self.assertTrue(ret)
-
- mock_db_version.assert_called_once_with('engine', 'repo')
-
- @mock.patch.object(migrate_api, 'db_version')
- def test__is_database_under_migrate_control__false(self, mock_db_version):
- mock_db_version.side_effect = \
- migrate_exceptions.DatabaseNotControlledError()
-
- ret = migration._is_database_under_migrate_control('engine', 'repo')
- self.assertFalse(ret)
-
- mock_db_version.assert_called_once_with('engine', 'repo')
-
- @mock.patch.object(alembic_migration.MigrationContext, 'configure')
- def test__is_database_under_alembic_control__true(self, mock_configure):
- context = mock_configure.return_value
- context.get_current_revision.return_value = 'foo'
- engine = mock.MagicMock()
-
- ret = migration._is_database_under_alembic_control(engine)
- self.assertTrue(ret)
-
- context.get_current_revision.assert_called_once_with()
-
- @mock.patch.object(alembic_migration.MigrationContext, 'configure')
- def test__is_database_under_alembic_control__false(self, mock_configure):
- context = mock_configure.return_value
- context.get_current_revision.return_value = None
- engine = mock.MagicMock()
-
- ret = migration._is_database_under_alembic_control(engine)
- self.assertFalse(ret)
-
- context.get_current_revision.assert_called_once_with()
-
-
-class ProjectTestCase(test.NoDBTestCase):
-
- def test_no_migrations_have_downgrade(self):
- topdir = os.path.normpath(os.path.dirname(__file__) + '/../../../')
- # Walk both the nova_api and nova (cell) database migrations.
- includes_downgrade = []
- for directory in (
- os.path.join(topdir, 'db', 'main', 'legacy_migrations'),
- os.path.join(topdir, 'db', 'api', 'legacy_migrations'),
- ):
- py_glob = os.path.join(directory, 'versions', '*.py')
- for path in glob.iglob(py_glob):
- has_upgrade = False
- has_downgrade = False
- with open(path, "r") as f:
- for line in f:
- if 'def upgrade(' in line:
- has_upgrade = True
- if 'def downgrade(' in line:
- has_downgrade = True
-
- if has_upgrade and has_downgrade:
- fname = os.path.basename(path)
- includes_downgrade.append(fname)
-
- helpful_msg = (
- "The following migrations have a downgrade "
- "which is not supported:"
- "\n\t%s" % '\n\t'.join(sorted(includes_downgrade)))
- self.assertFalse(includes_downgrade, helpful_msg)
diff --git a/nova/tests/unit/fake_policy.py b/nova/tests/unit/fake_policy.py
index bfc90e119e..2f8c483554 100644
--- a/nova/tests/unit/fake_policy.py
+++ b/nova/tests/unit/fake_policy.py
@@ -44,6 +44,7 @@ policy_data = """
"os_compute_api:servers:trigger_crash_dump": "",
"os_compute_api:servers:show:host_status": "",
"os_compute_api:servers:show": "",
+ "os_compute_api:servers:show:flavor-extra-specs" : "",
"os_compute_api:servers:show:host_status:unknown-only": "",
"os_compute_api:servers:allow_all_filters": "",
"os_compute_api:servers:migrations:force_complete": "",
diff --git a/nova/tests/unit/fixtures/test_libvirt.py b/nova/tests/unit/fixtures/test_libvirt.py
index eab9c54a13..448f8f6720 100644
--- a/nova/tests/unit/fixtures/test_libvirt.py
+++ b/nova/tests/unit/fixtures/test_libvirt.py
@@ -12,8 +12,9 @@
# License for the specific language governing permissions and limitations
# under the License.
+from unittest import mock
+
from lxml import etree
-import mock
from oslo_utils import uuidutils
from nova.objects import fields as obj_fields
diff --git a/nova/tests/unit/image/test_glance.py b/nova/tests/unit/image/test_glance.py
index 4f35f060e4..935a271d44 100644
--- a/nova/tests/unit/image/test_glance.py
+++ b/nova/tests/unit/image/test_glance.py
@@ -18,6 +18,7 @@ import copy
import datetime
import io
from io import StringIO
+from unittest import mock
import urllib.parse as urlparse
import cryptography
@@ -28,7 +29,6 @@ import glanceclient.exc
from glanceclient.v1 import images
from glanceclient.v2 import schemas
from keystoneauth1 import loading as ks_loading
-import mock
from oslo_utils.fixture import uuidsentinel as uuids
import testtools
diff --git a/nova/tests/unit/virt/powervm/disk/__init__.py b/nova/tests/unit/limit/__init__.py
index e69de29bb2..e69de29bb2 100644
--- a/nova/tests/unit/virt/powervm/disk/__init__.py
+++ b/nova/tests/unit/limit/__init__.py
diff --git a/nova/tests/unit/limit/test_local.py b/nova/tests/unit/limit/test_local.py
new file mode 100644
index 0000000000..8bf163d69f
--- /dev/null
+++ b/nova/tests/unit/limit/test_local.py
@@ -0,0 +1,256 @@
+# Copyright 2022 StackHPC
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import copy
+from unittest import mock
+
+from oslo_config import cfg
+from oslo_limit import exception as limit_exceptions
+from oslo_limit import fixture as limit_fixture
+from oslo_utils.fixture import uuidsentinel as uuids
+
+from nova import context
+from nova import exception
+from nova.limit import local as local_limit
+from nova.limit import utils as limit_utils
+from nova import objects
+from nova import test
+
+CONF = cfg.CONF
+
+
+class TestLocalLimits(test.NoDBTestCase):
+ def setUp(self):
+ super(TestLocalLimits, self).setUp()
+ self.flags(driver=limit_utils.UNIFIED_LIMITS_DRIVER, group="quota")
+ self.context = context.RequestContext()
+
+ def test_enforce_api_limit_metadata(self):
+ # default max is 128
+ self.useFixture(limit_fixture.LimitFixture(
+ {local_limit.SERVER_METADATA_ITEMS: 128}, {}))
+ local_limit.enforce_api_limit(local_limit.SERVER_METADATA_ITEMS, 128)
+
+ e = self.assertRaises(exception.MetadataLimitExceeded,
+ local_limit.enforce_api_limit,
+ local_limit.SERVER_METADATA_ITEMS, 129)
+ msg = ("Resource %s is over limit" % local_limit.SERVER_METADATA_ITEMS)
+ self.assertIn(msg, str(e))
+
+ def test_enforce_api_limit_skip(self):
+ self.flags(driver="nova.quota.NoopQuotaDriver", group="quota")
+ local_limit.enforce_api_limit(local_limit.SERVER_METADATA_ITEMS, 200)
+
+ @mock.patch('oslo_limit.limit.Enforcer')
+ def test_enforce_api_limit_session_init_error(self, mock_util):
+ mock_util.side_effect = limit_exceptions.SessionInitError('error')
+
+ e = self.assertRaises(exception.KeystoneConnectionFailed,
+ local_limit.enforce_api_limit,
+ local_limit.SERVER_METADATA_ITEMS, 42)
+ expected = ('Failed to connect to keystone while enforcing '
+ 'server_metadata_items quota limit.')
+ self.assertIn(expected, str(e))
+
+ def test_enforce_api_limit_raises_for_invalid_entity(self):
+ e = self.assertRaises(ValueError,
+ local_limit.enforce_api_limit,
+ local_limit.KEY_PAIRS, 42)
+ expected = '%s is not a valid API limit: %s' % (
+ local_limit.KEY_PAIRS, local_limit.API_LIMITS)
+ self.assertEqual(expected, str(e))
+
+ def test_enforce_api_limit_no_registered_limit_found(self):
+ self.useFixture(limit_fixture.LimitFixture({}, {}))
+ e = self.assertRaises(exception.MetadataLimitExceeded,
+ local_limit.enforce_api_limit,
+ local_limit.SERVER_METADATA_ITEMS, 42)
+ msg = ("Resource %s is over limit" % local_limit.SERVER_METADATA_ITEMS)
+ self.assertIn(msg, str(e))
+
+ def test_enforce_injected_files(self):
+ reglimits = {local_limit.INJECTED_FILES: 5,
+ local_limit.INJECTED_FILES_CONTENT: 10 * 1024,
+ local_limit.INJECTED_FILES_PATH: 255}
+ self.useFixture(limit_fixture.LimitFixture(reglimits, {}))
+
+ local_limit.enforce_api_limit(local_limit.INJECTED_FILES, 5)
+ local_limit.enforce_api_limit(local_limit.INJECTED_FILES_CONTENT,
+ 10 * 1024)
+ local_limit.enforce_api_limit(local_limit.INJECTED_FILES_PATH, 255)
+
+ e = self.assertRaises(exception.OnsetFileLimitExceeded,
+ local_limit.enforce_api_limit,
+ local_limit.INJECTED_FILES, 6)
+ msg = ("Resource %s is over limit" % local_limit.INJECTED_FILES)
+ self.assertIn(msg, str(e))
+ e = self.assertRaises(exception.OnsetFileContentLimitExceeded,
+ local_limit.enforce_api_limit,
+ local_limit.INJECTED_FILES_CONTENT,
+ 10 * 1024 + 1)
+ msg = (
+ "Resource %s is over limit" % local_limit.INJECTED_FILES_CONTENT)
+ self.assertIn(msg, str(e))
+ e = self.assertRaises(exception.OnsetFilePathLimitExceeded,
+ local_limit.enforce_api_limit,
+ local_limit.INJECTED_FILES_PATH, 256)
+ msg = ("Resource %s is over limit" % local_limit.INJECTED_FILES_PATH)
+ self.assertIn(msg, str(e))
+
+ @mock.patch.object(objects.KeyPairList, "get_count_by_user")
+ def test_enforce_db_limit_keypairs(self, mock_count):
+ self.useFixture(limit_fixture.LimitFixture(
+ {local_limit.KEY_PAIRS: 100}, {}))
+
+ mock_count.return_value = 99
+ local_limit.enforce_db_limit(self.context, local_limit.KEY_PAIRS,
+ uuids.user_id, 1)
+ mock_count.assert_called_once_with(self.context, uuids.user_id)
+
+ self.assertRaises(exception.KeypairLimitExceeded,
+ local_limit.enforce_db_limit,
+ self.context, local_limit.KEY_PAIRS,
+ uuids.user_id, 2)
+
+ mock_count.return_value = 100
+ local_limit.enforce_db_limit(self.context, local_limit.KEY_PAIRS,
+ uuids.user_id, 0)
+ mock_count.return_value = 101
+ self.assertRaises(exception.KeypairLimitExceeded,
+ local_limit.enforce_db_limit,
+ self.context, local_limit.KEY_PAIRS,
+ uuids.user_id, 0)
+
+ def test_enforce_db_limit_skip(self):
+ self.flags(driver="nova.quota.NoopQuotaDriver", group="quota")
+ local_limit.enforce_db_limit(self.context, local_limit.KEY_PAIRS,
+ uuids.user_id, 1)
+
+ @mock.patch('oslo_limit.limit.Enforcer')
+ def test_enforce_db_limit_session_init_error(self, mock_util):
+ mock_util.side_effect = limit_exceptions.SessionInitError(
+ test.TestingException())
+
+ e = self.assertRaises(exception.KeystoneConnectionFailed,
+ local_limit.enforce_db_limit, self.context,
+ local_limit.KEY_PAIRS, uuids.user_id, 42)
+ expected = ('Failed to connect to keystone while enforcing '
+ 'server_key_pairs quota limit.')
+ self.assertEqual(expected, str(e))
+
+ def test_enforce_db_limit_raise_on_invalid(self):
+ e = self.assertRaises(ValueError, local_limit.enforce_db_limit,
+ self.context, local_limit.INJECTED_FILES,
+ uuids.user_id, 1)
+ fmt = '%s does not have a DB count function defined: %s'
+ expected = fmt % (
+ local_limit.INJECTED_FILES, local_limit.DB_COUNT_FUNCTION.keys())
+ self.assertEqual(expected, str(e))
+
+ @mock.patch.object(objects.KeyPairList, "get_count_by_user")
+ def test_enforce_db_limit_no_registered_limit_found(self, mock_count):
+ self.useFixture(limit_fixture.LimitFixture({}, {}))
+ mock_count.return_value = 5
+ e = self.assertRaises(exception.KeypairLimitExceeded,
+ local_limit.enforce_db_limit, self.context,
+ local_limit.KEY_PAIRS, uuids.user_id, 42)
+ msg = ("Resource %s is over limit" % local_limit.KEY_PAIRS)
+ self.assertIn(msg, str(e))
+
+ def test_enforce_db_limit_raise_bad_delta(self):
+ e = self.assertRaises(ValueError, local_limit.enforce_db_limit,
+ self.context, local_limit.KEY_PAIRS,
+ uuids.user_id, -1)
+ self.assertEqual("delta must be a positive integer", str(e))
+
+ @mock.patch.object(objects.InstanceGroupList, "get_counts")
+ def test_enforce_db_limit_server_groups(self, mock_count):
+ self.useFixture(limit_fixture.LimitFixture(
+ {local_limit.SERVER_GROUPS: 10}, {}))
+
+ mock_count.return_value = {'project': {'server_groups': 9}}
+ local_limit.enforce_db_limit(self.context, local_limit.SERVER_GROUPS,
+ uuids.project_id, 1)
+ mock_count.assert_called_once_with(self.context, uuids.project_id)
+
+ self.assertRaises(exception.ServerGroupLimitExceeded,
+ local_limit.enforce_db_limit,
+ self.context, local_limit.SERVER_GROUPS,
+ uuids.project_id, 2)
+
+ @mock.patch.object(objects.InstanceGroup, "get_by_uuid")
+ def test_enforce_db_limit_server_group_members(self, mock_get):
+ self.useFixture(limit_fixture.LimitFixture(
+ {local_limit.SERVER_GROUP_MEMBERS: 10}, {}))
+
+ mock_get.return_value = objects.InstanceGroup(members=[])
+ local_limit.enforce_db_limit(self.context,
+ local_limit.SERVER_GROUP_MEMBERS,
+ uuids.server_group, 10)
+ mock_get.assert_called_once_with(self.context, uuids.server_group)
+
+ self.assertRaises(exception.GroupMemberLimitExceeded,
+ local_limit.enforce_db_limit,
+ self.context, local_limit.SERVER_GROUP_MEMBERS,
+ uuids.server_group, 11)
+
+ @mock.patch.object(objects.InstanceGroupList, "get_counts")
+ def test_get_in_use(self, mock_count):
+ mock_count.return_value = {'project': {'server_groups': 9}}
+ usages = local_limit.get_in_use(self.context, uuids.project_id)
+ expected_usages = {
+ 'injected_file_content_bytes': 0,
+ 'injected_file_path_bytes': 0,
+ 'injected_files': 0,
+ 'key_pairs': 0,
+ 'metadata_items': 0,
+ 'server_group_members': 0,
+ 'server_groups': 9
+ }
+ self.assertEqual(expected_usages, usages)
+
+
+class GetLegacyLimitsTest(test.NoDBTestCase):
+ def setUp(self):
+ super(GetLegacyLimitsTest, self).setUp()
+ self.new = {"server_metadata_items": 1,
+ "server_injected_files": 2,
+ "server_injected_file_content_bytes": 3,
+ "server_injected_file_path_bytes": 4,
+ "server_key_pairs": 5,
+ "server_groups": 6,
+ "server_group_members": 7}
+ self.legacy = {"metadata_items": 1,
+ "injected_files": 2,
+ "injected_file_content_bytes": 3,
+ "injected_file_path_bytes": 4,
+ "key_pairs": 5,
+ "server_groups": 6,
+ "server_group_members": 7}
+ self.resources = list(local_limit.API_LIMITS | local_limit.DB_LIMITS)
+ self.resources.sort()
+ self.flags(driver=limit_utils.UNIFIED_LIMITS_DRIVER, group="quota")
+
+ def test_convert_keys_to_legacy_name(self):
+ limits = local_limit._convert_keys_to_legacy_name(self.new)
+ self.assertEqual(self.legacy, limits)
+
+ def test_get_legacy_default_limits(self):
+ reglimits = copy.deepcopy(self.new)
+ reglimits.pop('server_key_pairs')
+ self.useFixture(limit_fixture.LimitFixture(reglimits, {}))
+ limits = local_limit.get_legacy_default_limits()
+ expected = copy.deepcopy(self.legacy)
+ expected['key_pairs'] = 0
+ self.assertEqual(expected, limits)
diff --git a/nova/tests/unit/limit/test_placement.py b/nova/tests/unit/limit/test_placement.py
new file mode 100644
index 0000000000..3640890c74
--- /dev/null
+++ b/nova/tests/unit/limit/test_placement.py
@@ -0,0 +1,353 @@
+# Copyright 2022 StackHPC
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from unittest import mock
+
+from oslo_config import cfg
+from oslo_limit import exception as limit_exceptions
+from oslo_limit import fixture as limit_fixture
+from oslo_limit import limit
+from oslo_utils.fixture import uuidsentinel as uuids
+
+from nova import context
+from nova import exception
+from nova.limit import placement as placement_limits
+from nova.limit import utils as limit_utils
+from nova import objects
+from nova import quota
+from nova.scheduler.client import report
+from nova import test
+
+CONF = cfg.CONF
+
+
+class TestGetUsage(test.NoDBTestCase):
+ def setUp(self):
+ super(TestGetUsage, self).setUp()
+ self.flags(driver=limit_utils.UNIFIED_LIMITS_DRIVER, group="quota")
+ self.context = context.RequestContext()
+
+ @mock.patch.object(quota, "is_qfd_populated")
+ @mock.patch.object(objects.InstanceMappingList, "get_counts")
+ @mock.patch.object(report.SchedulerReportClient,
+ "get_usages_counts_for_limits")
+ def test_get_usage(self, mock_placement, mock_inst, mock_qfd):
+ resources = ["servers", "class:VCPU", "class:MEMORY_MB",
+ "class:CUSTOM_BAREMETAL"]
+ mock_qfd.return_value = True
+ mock_placement.return_value = {"VCPU": 1, "CUSTOM_BAREMETAL": 2}
+ mock_inst.return_value = {"project": {"instances": 42}}
+
+ usage = placement_limits._get_usage(self.context, uuids.project,
+ resources)
+
+ expected = {'class:MEMORY_MB': 0, 'class:VCPU': 1, 'servers': 42,
+ 'class:CUSTOM_BAREMETAL': 2}
+ self.assertDictEqual(expected, usage)
+
+ def test_get_usage_bad_resources(self):
+ bad_resource = ["unknown_resource"]
+ self.assertRaises(ValueError, placement_limits._get_usage,
+ self.context, uuids.project, bad_resource)
+ bad_class = ["class:UNKNOWN_CLASS"]
+ self.assertRaises(ValueError, placement_limits._get_usage,
+ self.context, uuids.project, bad_class)
+ no_resources = []
+ self.assertRaises(ValueError, placement_limits._get_usage,
+ self.context, uuids.project, no_resources)
+
+ @mock.patch.object(quota, "is_qfd_populated")
+ def test_get_usage_bad_qfd(self, mock_qfd):
+ mock_qfd.return_value = False
+ resources = ["servers"]
+ e = self.assertRaises(ValueError, placement_limits._get_usage,
+ self.context, uuids.project, resources)
+ self.assertEqual("must first migrate instance mappings", str(e))
+
+ def test_get_usage_unified_limits_disabled(self):
+ self.flags(driver="nova.quota.NoopQuotaDriver", group="quota")
+ e = self.assertRaises(NotImplementedError, placement_limits._get_usage,
+ self.context, uuids.project, [])
+ self.assertEqual("Unified limits support is disabled", str(e))
+
+ @mock.patch.object(quota, "is_qfd_populated")
+ @mock.patch.object(objects.InstanceMappingList, "get_counts")
+ @mock.patch.object(report.SchedulerReportClient,
+ 'get_usages_counts_for_limits')
+ def test_get_usage_placement_fail(self, mock_placement, mock_inst,
+ mock_qfd):
+ resources = ["servers", "class:VCPU", "class:MEMORY_MB",
+ "class:CUSTOM_BAREMETAL"]
+ mock_qfd.return_value = True
+ mock_placement.side_effect = exception.UsagesRetrievalFailed(
+ project_id=uuids.project, user_id=uuids.user)
+ mock_inst.return_value = {"project": {"instances": 42}}
+
+ e = self.assertRaises(
+ exception.UsagesRetrievalFailed, placement_limits._get_usage,
+ self.context, uuids.project, resources)
+
+ expected = ("Failed to retrieve usages from placement while enforcing "
+ "%s quota limits." % ", ".join(resources))
+ self.assertEqual(expected, str(e))
+
+ @mock.patch.object(quota, "is_qfd_populated")
+ @mock.patch.object(objects.InstanceMappingList, "get_counts")
+ @mock.patch.object(report.SchedulerReportClient,
+ "get_usages_counts_for_limits")
+ def test_get_usage_pcpu_as_vcpu(self, mock_placement, mock_inst, mock_qfd):
+ # Test that when configured, PCPU count is merged into VCPU count when
+ # appropriate.
+ self.flags(unified_limits_count_pcpu_as_vcpu=True, group="workarounds")
+ mock_qfd.return_value = True
+ mock_inst.return_value = {"project": {"instances": 42}}
+
+ # PCPU was not specified in the flavor but usage was found in
+ # placement. PCPU count should be merged into VCPU count.
+ resources = ["servers", "class:VCPU", "class:MEMORY_MB"]
+ mock_placement.return_value = {"VCPU": 1, "PCPU": 2}
+
+ usage = placement_limits._get_usage(self.context, uuids.project,
+ resources)
+
+ expected = {'class:MEMORY_MB': 0, 'class:VCPU': 3, 'servers': 42}
+ self.assertDictEqual(expected, usage)
+
+ # PCPU was not specified in the flavor and usage was found in placement
+ # and there was no VCPU usage in placement. The PCPU count should be
+ # returned as VCPU count.
+ resources = ["servers", "class:VCPU", "class:MEMORY_MB"]
+ mock_placement.return_value = {"PCPU": 1}
+
+ usage = placement_limits._get_usage(self.context, uuids.project,
+ resources)
+
+ expected = {'class:MEMORY_MB': 0, 'class:VCPU': 1, 'servers': 42}
+ self.assertDictEqual(expected, usage)
+
+ # PCPU was not specified in the flavor but only VCPU usage was found in
+ # placement.
+ resources = ["servers", "class:VCPU", "class:MEMORY_MB"]
+ mock_placement.return_value = {"VCPU": 1}
+
+ usage = placement_limits._get_usage(self.context, uuids.project,
+ resources)
+
+ expected = {'class:MEMORY_MB': 0, 'class:VCPU': 1, 'servers': 42}
+ self.assertDictEqual(expected, usage)
+
+ # PCPU was specified in the flavor, so the counts should be separate.
+ resources = ["servers", "class:VCPU", "class:MEMORY_MB", "class:PCPU"]
+ mock_placement.return_value = {"VCPU": 1, "PCPU": 2}
+
+ usage = placement_limits._get_usage(self.context, uuids.project,
+ resources)
+
+ expected = {'class:MEMORY_MB': 0, 'class:VCPU': 1, 'servers': 42,
+ 'class:PCPU': 2}
+ self.assertDictEqual(expected, usage)
+
+
+class TestGetDeltas(test.NoDBTestCase):
+ def test_get_deltas(self):
+ flavor = objects.Flavor(memory_mb=100, vcpus=10, swap=0,
+ ephemeral_gb=2, root_gb=5)
+
+ deltas = placement_limits._get_deltas_by_flavor(flavor, False, 2)
+
+ expected = {'servers': 2,
+ 'class:VCPU': 20, 'class:MEMORY_MB': 200,
+ 'class:DISK_GB': 14}
+ self.assertDictEqual(expected, deltas)
+
+ def test_get_deltas_recheck(self):
+ flavor = objects.Flavor(memory_mb=100, vcpus=10, swap=0,
+ ephemeral_gb=2, root_gb=5)
+
+ deltas = placement_limits._get_deltas_by_flavor(flavor, False, 0)
+
+ expected = {'servers': 0,
+ 'class:VCPU': 0, 'class:MEMORY_MB': 0,
+ 'class:DISK_GB': 0}
+ self.assertDictEqual(expected, deltas)
+
+ def test_get_deltas_check_baremetal(self):
+ extra_specs = {"resources:VCPU": 0, "resources:MEMORY_MB": 0,
+ "resources:DISK_GB": 0, "resources:CUSTOM_BAREMETAL": 1}
+ flavor = objects.Flavor(memory_mb=100, vcpus=10, swap=0,
+ ephemeral_gb=2, root_gb=5,
+ extra_specs=extra_specs)
+
+ deltas = placement_limits._get_deltas_by_flavor(flavor, True, 1)
+
+ expected = {'servers': 1, 'class:CUSTOM_BAREMETAL': 1}
+ self.assertDictEqual(expected, deltas)
+
+ def test_get_deltas_check_bfv(self):
+ flavor = objects.Flavor(memory_mb=100, vcpus=10, swap=0,
+ ephemeral_gb=2, root_gb=5)
+
+ deltas = placement_limits._get_deltas_by_flavor(flavor, True, 2)
+
+ expected = {'servers': 2,
+ 'class:VCPU': 20, 'class:MEMORY_MB': 200,
+ 'class:DISK_GB': 4}
+ self.assertDictEqual(expected, deltas)
+
+
+class TestEnforce(test.NoDBTestCase):
+ def setUp(self):
+ super(TestEnforce, self).setUp()
+ self.context = context.RequestContext()
+ self.flags(driver=limit_utils.UNIFIED_LIMITS_DRIVER, group="quota")
+
+ placement_limits._ENFORCER = mock.Mock(limit.Enforcer)
+ self.flavor = objects.Flavor(memory_mb=100, vcpus=10, swap=0,
+ ephemeral_gb=2, root_gb=5)
+
+ def test_enforce_num_instances_and_flavor_disabled(self):
+ self.flags(driver="nova.quota.NoopQuotaDriver", group="quota")
+ count = placement_limits.enforce_num_instances_and_flavor(
+ self.context, uuids.project_id, "flavor", False, 0, 42)
+ self.assertEqual(42, count)
+
+ @mock.patch('oslo_limit.limit.Enforcer')
+ def test_enforce_num_instances_and_flavor(self, mock_limit):
+ mock_enforcer = mock.MagicMock()
+ mock_limit.return_value = mock_enforcer
+
+ count = placement_limits.enforce_num_instances_and_flavor(
+ self.context, uuids.project_id, self.flavor, False, 0, 2)
+
+ self.assertEqual(2, count)
+ mock_limit.assert_called_once_with(mock.ANY)
+ mock_enforcer.enforce.assert_called_once_with(
+ uuids.project_id,
+ {'servers': 2, 'class:VCPU': 20, 'class:MEMORY_MB': 200,
+ 'class:DISK_GB': 14})
+
+ @mock.patch('oslo_limit.limit.Enforcer')
+ def test_enforce_num_instances_and_flavor_recheck(self, mock_limit):
+ mock_enforcer = mock.MagicMock()
+ mock_limit.return_value = mock_enforcer
+
+ count = placement_limits.enforce_num_instances_and_flavor(
+ self.context, uuids.project_id, self.flavor, False, 0, 0)
+
+ self.assertEqual(0, count)
+ mock_limit.assert_called_once_with(mock.ANY)
+ mock_enforcer.enforce.assert_called_once_with(
+ uuids.project_id,
+ {'servers': 0, 'class:VCPU': 0, 'class:MEMORY_MB': 0,
+ 'class:DISK_GB': 0})
+
+ @mock.patch('oslo_limit.limit.Enforcer')
+ def test_enforce_num_instances_and_flavor_retry(self, mock_limit):
+ mock_enforcer = mock.MagicMock()
+ mock_limit.return_value = mock_enforcer
+ over_limit_info_list = [
+ limit_exceptions.OverLimitInfo("class:VCPU", 12, 0, 30)
+ ]
+ mock_enforcer.enforce.side_effect = [
+ limit_exceptions.ProjectOverLimit(
+ uuids.project_id, over_limit_info_list),
+ None]
+
+ count = placement_limits.enforce_num_instances_and_flavor(
+ self.context, uuids.project_id, self.flavor, True, 0, 3)
+
+ self.assertEqual(2, count)
+ self.assertEqual(2, mock_enforcer.enforce.call_count)
+ mock_enforcer.enforce.assert_called_with(
+ uuids.project_id,
+ {'servers': 2, 'class:VCPU': 20, 'class:MEMORY_MB': 200,
+ 'class:DISK_GB': 4})
+
+ @mock.patch('oslo_limit.limit.Enforcer')
+ def test_enforce_num_instances_and_flavor_fails(self, mock_limit):
+ mock_enforcer = mock.MagicMock()
+ mock_limit.return_value = mock_enforcer
+ over_limit_info_list = [
+ limit_exceptions.OverLimitInfo("class:VCPU", 12, 0, 20),
+ limit_exceptions.OverLimitInfo("servers", 2, 1, 2)
+ ]
+ expected = limit_exceptions.ProjectOverLimit(uuids.project_id,
+ over_limit_info_list)
+ mock_enforcer.enforce.side_effect = expected
+
+ # Verify that the oslo.limit ProjectOverLimit gets translated to a
+ # TooManyInstances that the API knows how to handle
+ e = self.assertRaises(
+ exception.TooManyInstances,
+ placement_limits.enforce_num_instances_and_flavor, self.context,
+ uuids.project_id, self.flavor, True, 2, 4)
+
+ self.assertEqual(str(expected), str(e))
+ self.assertEqual(3, mock_enforcer.enforce.call_count)
+
+ @mock.patch('oslo_limit.limit.Enforcer')
+ def test_enforce_num_instances_and_flavor_placement_fail(self, mock_limit):
+ mock_enforcer = mock.MagicMock()
+ mock_limit.return_value = mock_enforcer
+ mock_enforcer.enforce.side_effect = exception.UsagesRetrievalFailed(
+ 'Failed to retrieve usages')
+
+ e = self.assertRaises(
+ exception.UsagesRetrievalFailed,
+ placement_limits.enforce_num_instances_and_flavor, self.context,
+ uuids.project, self.flavor, True, 0, 5)
+
+ expected = str(mock_enforcer.enforce.side_effect)
+ self.assertEqual(expected, str(e))
+
+
+class GetLegacyLimitsTest(test.NoDBTestCase):
+ def setUp(self):
+ super(GetLegacyLimitsTest, self).setUp()
+ self.new = {"servers": 1, "class:VCPU": 2, "class:MEMORY_MB": 3}
+ self.legacy = {"instances": 1, "cores": 2, "ram": 3}
+ self.resources = ["servers", "class:VCPU", "class:MEMORY_MB"]
+ self.resources.sort()
+ self.flags(driver=limit_utils.UNIFIED_LIMITS_DRIVER, group="quota")
+
+ def test_convert_keys_to_legacy_name(self):
+ limits = placement_limits._convert_keys_to_legacy_name(self.new)
+ self.assertEqual(self.legacy, limits)
+
+ def test_get_legacy_default_limits(self):
+ reglimits = {'servers': 1, 'class:VCPU': 2}
+ self.useFixture(limit_fixture.LimitFixture(reglimits, {}))
+ limits = placement_limits.get_legacy_default_limits()
+ self.assertEqual({'cores': 2, 'instances': 1, 'ram': 0}, limits)
+
+ def test_get_legacy_project_limits(self):
+ reglimits = {'servers': 5, 'class:MEMORY_MB': 7}
+ projlimits = {uuids.project_id: {'servers': 1}}
+ self.useFixture(limit_fixture.LimitFixture(reglimits, projlimits))
+ limits = placement_limits.get_legacy_project_limits(uuids.project_id)
+ self.assertEqual({'instances': 1, 'cores': 0, 'ram': 7}, limits)
+
+ @mock.patch.object(report.SchedulerReportClient,
+ "get_usages_counts_for_limits")
+ @mock.patch.object(objects.InstanceMappingList, "get_counts")
+ @mock.patch.object(quota, "is_qfd_populated")
+ def test_get_legacy_counts(self, mock_qfd, mock_counts, mock_placement):
+ mock_qfd.return_value = True
+ mock_counts.return_value = {"project": {"instances": 1}}
+ mock_placement.return_value = {
+ "VCPU": 2, "CUSTOM_BAREMETAL": 2, "MEMORY_MB": 3,
+ }
+ counts = placement_limits.get_legacy_counts(
+ "context", uuids.project_id)
+ self.assertEqual(self.legacy, counts)
diff --git a/nova/tests/unit/network/test_network_info.py b/nova/tests/unit/network/test_network_info.py
index 0420e2d791..1c604975b0 100644
--- a/nova/tests/unit/network/test_network_info.py
+++ b/nova/tests/unit/network/test_network_info.py
@@ -738,6 +738,52 @@ iface eth0 inet6 static
template = self._setup_injected_network_scenario(use_ipv4=False)
self.assertEqual(expected, template)
+ def test_injection_ipv6_only(self):
+ expected = '''\
+# Injected by Nova on instance boot
+#
+# This file describes the network interfaces available on your system
+# and how to activate them. For more information, see interfaces(5).
+
+# The loopback network interface
+auto lo
+iface lo inet loopback
+
+auto eth0
+iface eth0 inet6 static
+ hwaddress ether aa:aa:aa:aa:aa:aa
+ address 1234:567::2
+ netmask 48
+ gateway 1234:567::1
+ dns-nameservers 2001:4860:4860::8888 2001:4860:4860::8844
+'''
+ template = self._setup_injected_network_scenario(use_ipv4=False,
+ use_ipv6=True)
+ self.assertEqual(expected, template)
+
+ def test_injection_ipv6_only_no_gateway(self):
+ expected = '''\
+# Injected by Nova on instance boot
+#
+# This file describes the network interfaces available on your system
+# and how to activate them. For more information, see interfaces(5).
+
+# The loopback network interface
+auto lo
+iface lo inet loopback
+
+auto eth0
+iface eth0 inet6 static
+ hwaddress ether aa:aa:aa:aa:aa:aa
+ address 1234:567::2
+ netmask 48
+ dns-nameservers 2001:4860:4860::8888 2001:4860:4860::8844
+'''
+ template = self._setup_injected_network_scenario(use_ipv4=False,
+ use_ipv6=True,
+ gateway=False)
+ self.assertEqual(expected, template)
+
def test_injection_ipv6_two_interfaces(self):
expected = """\
# Injected by Nova on instance boot
diff --git a/nova/tests/unit/network/test_neutron.py b/nova/tests/unit/network/test_neutron.py
index 5056b70c4e..9aa970aca1 100644
--- a/nova/tests/unit/network/test_neutron.py
+++ b/nova/tests/unit/network/test_neutron.py
@@ -16,11 +16,11 @@
import collections
import copy
+from unittest import mock
from keystoneauth1.fixture import V2Token
from keystoneauth1 import loading as ks_loading
from keystoneauth1 import service_token
-import mock
from neutronclient.common import exceptions
from neutronclient.v2_0 import client
from oslo_config import cfg
@@ -39,9 +39,9 @@ from nova.network import constants
from nova.network import model
from nova.network import neutron as neutronapi
from nova import objects
+from nova.objects import fields as obj_fields
from nova.objects import network_request as net_req_obj
from nova.objects import virtual_interface as obj_vif
-from nova.pci import manager as pci_manager
from nova.pci import request as pci_request
from nova.pci import utils as pci_utils
from nova.pci import whitelist as pci_whitelist
@@ -514,7 +514,11 @@ class TestAPIBase(test.TestCase):
has_dns_extension = False
if kwargs.get('dns_extension'):
has_dns_extension = True
- self.api.extensions[constants.DNS_INTEGRATION] = 1
+ self.api.extensions = {
+ constants.DNS_INTEGRATION: {
+ 'alias': constants.DNS_INTEGRATION,
+ },
+ }
# Net idx is 1-based for compatibility with existing unit tests
nets = self.nets[net_idx - 1]
@@ -1166,35 +1170,14 @@ class TestAPI(TestAPIBase):
mock_get_physnet.assert_called_once_with(
mock.ANY, mock.ANY, self.port_data1[0]['network_id'])
- @mock.patch.object(neutronapi, 'get_client')
- def test_refresh_neutron_extensions_cache(self, mock_get_client):
+ def test_refresh_neutron_extensions_cache(self):
mocked_client = mock.create_autospec(client.Client)
- mock_get_client.return_value = mocked_client
mocked_client.list_extensions.return_value = {
- 'extensions': [{'name': constants.QOS_QUEUE}]}
- self.api._refresh_neutron_extensions_cache(self.context)
+ 'extensions': [{'alias': constants.DNS_INTEGRATION}]}
+ self.api._refresh_neutron_extensions_cache(mocked_client)
self.assertEqual(
- {constants.QOS_QUEUE: {'name': constants.QOS_QUEUE}},
+ {constants.DNS_INTEGRATION: {'alias': constants.DNS_INTEGRATION}},
self.api.extensions)
- mock_get_client.assert_called_once_with(self.context)
- mocked_client.list_extensions.assert_called_once_with()
-
- @mock.patch.object(neutronapi, 'get_client')
- def test_populate_neutron_extension_values_rxtx_factor(
- self, mock_get_client):
- mocked_client = mock.create_autospec(client.Client)
- mock_get_client.return_value = mocked_client
- mocked_client.list_extensions.return_value = {
- 'extensions': [{'name': constants.QOS_QUEUE}]}
- flavor = objects.Flavor.get_by_name(self.context, 'm1.small')
- flavor['rxtx_factor'] = 1
- instance = objects.Instance(system_metadata={})
- instance.flavor = flavor
- port_req_body = {'port': {}}
- self.api._populate_neutron_extension_values(self.context, instance,
- None, port_req_body)
- self.assertEqual(1, port_req_body['port']['rxtx_factor'])
- mock_get_client.assert_called_once_with(self.context)
mocked_client.list_extensions.assert_called_once_with()
def test_allocate_for_instance_1(self):
@@ -2414,9 +2397,13 @@ class TestAPI(TestAPIBase):
mock_nc.show_port.side_effect = exceptions.PortNotFoundClient
if fip_ext_enabled:
- self.api.extensions = [constants.FIP_PORT_DETAILS]
+ self.api.extensions = {
+ constants.FIP_PORT_DETAILS: {
+ 'alias': constants.FIP_PORT_DETAILS,
+ },
+ }
else:
- self.api.extensions = []
+ self.api.extensions = {}
fip = self.api.get_floating_ip(self.context, uuids.fip_id)
@@ -2489,9 +2476,13 @@ class TestAPI(TestAPIBase):
mock_nc.show_port.side_effect = exceptions.PortNotFoundClient
if fip_ext_enabled:
- self.api.extensions = [constants.FIP_PORT_DETAILS]
+ self.api.extensions = {
+ constants.FIP_PORT_DETAILS: {
+ 'alias': constants.FIP_PORT_DETAILS,
+ },
+ }
else:
- self.api.extensions = []
+ self.api.extensions = {}
fip = self.api.get_floating_ip_by_address(self.context, '172.1.2.3')
@@ -3391,6 +3382,155 @@ class TestAPI(TestAPIBase):
mocked_client.list_ports.assert_called_once_with(
tenant_id=uuids.fake, device_id=uuids.instance)
+ @mock.patch.object(
+ neutronapi.API,
+ '_get_physnet_tunneled_info',
+ new=mock.Mock(return_value=(None, False)))
+ @mock.patch.object(
+ neutronapi.API,
+ '_get_preexisting_port_ids',
+ new=mock.Mock(return_value=[]))
+ @mock.patch.object(
+ neutronapi.API,
+ '_get_subnets_from_port',
+ new=mock.Mock(return_value=[model.Subnet(cidr='1.0.0.0/8')]))
+ @mock.patch.object(
+ neutronapi.API,
+ '_get_floating_ips_by_fixed_and_port',
+ new=mock.Mock(return_value=[{'floating_ip_address': '10.0.0.1'}]))
+ @mock.patch.object(neutronapi, 'get_client')
+ def test_build_network_info_model_full_vnic_type_change(
+ self, mock_get_client
+ ):
+ mocked_client = mock.create_autospec(client.Client)
+ mock_get_client.return_value = mocked_client
+ fake_inst = objects.Instance()
+ fake_inst.project_id = uuids.fake
+ fake_inst.uuid = uuids.instance
+ fake_ports = [
+ {
+ "id": "port1",
+ "network_id": "net-id",
+ "tenant_id": uuids.fake,
+ "admin_state_up": True,
+ "status": "ACTIVE",
+ "fixed_ips": [{"ip_address": "1.1.1.1"}],
+ "mac_address": "de:ad:be:ef:00:01",
+ "binding:vif_type": model.VIF_TYPE_BRIDGE,
+ "binding:vnic_type": model.VNIC_TYPE_DIRECT,
+ "binding:vif_details": {},
+ },
+ ]
+ mocked_client.list_ports.return_value = {'ports': fake_ports}
+ fake_inst.info_cache = objects.InstanceInfoCache.new(
+ self.context, uuids.instance)
+ fake_inst.info_cache.network_info = model.NetworkInfo.hydrate([])
+
+ # build the network info first
+ nw_infos = self.api._build_network_info_model(
+ self.context,
+ fake_inst,
+ force_refresh=True,
+ )
+
+ self.assertEqual(1, len(nw_infos))
+ fake_inst.info_cache.network_info = nw_infos
+
+ # change the vnic_type of the port and rebuild the network info
+ fake_ports[0]["binding:vnic_type"] = model.VNIC_TYPE_MACVTAP
+ with mock.patch(
+ "nova.network.neutron.API._log_error_if_vnic_type_changed"
+ ) as mock_log:
+ nw_infos = self.api._build_network_info_model(
+ self.context,
+ fake_inst,
+ force_refresh=True,
+ )
+
+ mock_log.assert_called_once_with(
+ fake_ports[0]["id"], "direct", "macvtap", fake_inst)
+ self.assertEqual(1, len(nw_infos))
+
+ @mock.patch.object(
+ neutronapi.API,
+ '_get_physnet_tunneled_info',
+ new=mock.Mock(return_value=(None, False)))
+ @mock.patch.object(
+ neutronapi.API,
+ '_get_preexisting_port_ids',
+ new=mock.Mock(return_value=[]))
+ @mock.patch.object(
+ neutronapi.API,
+ '_get_subnets_from_port',
+ new=mock.Mock(return_value=[model.Subnet(cidr='1.0.0.0/8')]))
+ @mock.patch.object(
+ neutronapi.API,
+ '_get_floating_ips_by_fixed_and_port',
+ new=mock.Mock(return_value=[{'floating_ip_address': '10.0.0.1'}]))
+ @mock.patch.object(neutronapi, 'get_client')
+ def test_build_network_info_model_single_vnic_type_change(
+ self, mock_get_client
+ ):
+ mocked_client = mock.create_autospec(client.Client)
+ mock_get_client.return_value = mocked_client
+ fake_inst = objects.Instance()
+ fake_inst.project_id = uuids.fake
+ fake_inst.uuid = uuids.instance
+ fake_ports = [
+ {
+ "id": "port1",
+ "network_id": "net-id",
+ "tenant_id": uuids.fake,
+ "admin_state_up": True,
+ "status": "ACTIVE",
+ "fixed_ips": [{"ip_address": "1.1.1.1"}],
+ "mac_address": "de:ad:be:ef:00:01",
+ "binding:vif_type": model.VIF_TYPE_BRIDGE,
+ "binding:vnic_type": model.VNIC_TYPE_DIRECT,
+ "binding:vif_details": {},
+ },
+ ]
+ fake_nets = [
+ {
+ "id": "net-id",
+ "name": "foo",
+ "tenant_id": uuids.fake,
+ }
+ ]
+ mocked_client.list_ports.return_value = {'ports': fake_ports}
+ fake_inst.info_cache = objects.InstanceInfoCache.new(
+ self.context, uuids.instance)
+ fake_inst.info_cache.network_info = model.NetworkInfo.hydrate([])
+
+ # build the network info first
+ nw_infos = self.api._build_network_info_model(
+ self.context,
+ fake_inst,
+ fake_nets,
+ [fake_ports[0]["id"]],
+ refresh_vif_id=fake_ports[0]["id"],
+ )
+
+ self.assertEqual(1, len(nw_infos))
+ fake_inst.info_cache.network_info = nw_infos
+
+ # change the vnic_type of the port and rebuild the network info
+ fake_ports[0]["binding:vnic_type"] = model.VNIC_TYPE_MACVTAP
+ with mock.patch(
+ "nova.network.neutron.API._log_error_if_vnic_type_changed"
+ ) as mock_log:
+ nw_infos = self.api._build_network_info_model(
+ self.context,
+ fake_inst,
+ fake_nets,
+ [fake_ports[0]["id"]],
+ refresh_vif_id=fake_ports[0]["id"],
+ )
+
+ mock_log.assert_called_once_with(
+ fake_ports[0]["id"], "direct", "macvtap", fake_inst)
+ self.assertEqual(1, len(nw_infos))
+
@mock.patch.object(neutronapi, 'get_client')
def test_get_subnets_from_port(self, mock_get_client):
mocked_client = mock.create_autospec(client.Client)
@@ -3473,7 +3613,7 @@ class TestAPI(TestAPIBase):
'provider:network_type': 'vxlan'}]}}
test_ext_list = {'extensions':
[{'name': 'Multi Provider Network',
- 'alias': 'multi-segments'}]}
+ 'alias': 'multi-provider'}]}
mock_client = mock_get_client.return_value
mock_client.list_extensions.return_value = test_ext_list
@@ -3494,7 +3634,7 @@ class TestAPI(TestAPIBase):
'provider:network_type': 'vlan'}}
test_ext_list = {'extensions':
[{'name': 'Multi Provider Network',
- 'alias': 'multi-segments'}]}
+ 'alias': 'multi-provider'}]}
mock_client = mock_get_client.return_value
mock_client.list_extensions.return_value = test_ext_list
@@ -3520,7 +3660,7 @@ class TestAPI(TestAPIBase):
'provider:network_type': 'vlan'}]}}
test_ext_list = {'extensions':
[{'name': 'Multi Provider Network',
- 'alias': 'multi-segments'}]}
+ 'alias': 'multi-provider'}]}
mock_client = mock_get_client.return_value
mock_client.list_extensions.return_value = test_ext_list
@@ -3565,6 +3705,23 @@ class TestAPI(TestAPIBase):
self.assertFalse(tunneled)
self.assertIsNone(physnet_name)
+ def test_is_remote_managed(self):
+ cases = {
+ (model.VNIC_TYPE_NORMAL, False),
+ (model.VNIC_TYPE_DIRECT, False),
+ (model.VNIC_TYPE_MACVTAP, False),
+ (model.VNIC_TYPE_DIRECT_PHYSICAL, False),
+ (model.VNIC_TYPE_BAREMETAL, False),
+ (model.VNIC_TYPE_VIRTIO_FORWARDER, False),
+ (model.VNIC_TYPE_VDPA, False),
+ (model.VNIC_TYPE_ACCELERATOR_DIRECT, False),
+ (model.VNIC_TYPE_ACCELERATOR_DIRECT_PHYSICAL, False),
+ (model.VNIC_TYPE_REMOTE_MANAGED, True),
+ }
+
+ for vnic_type, expected in cases:
+ self.assertEqual(self.api._is_remote_managed(vnic_type), expected)
+
def _test_get_port_vnic_info(
self, mock_get_client, binding_vnic_type, expected_vnic_type,
port_resource_request=None, numa_policy=None
@@ -3711,6 +3868,27 @@ class TestAPI(TestAPIBase):
count = self.api.validate_networks(self.context, requested_networks, 1)
self.assertEqual(1, count)
+ @mock.patch('nova.network.neutron.API._show_port')
+ def test_deferred_ip_port_none_allocation(self, mock_show):
+ """Test behavior when the 'none' IP allocation policy is used."""
+ port = {
+ 'network_id': 'my_netid1',
+ 'device_id': None,
+ 'id': uuids.port,
+ 'fixed_ips': [], # no fixed ip
+ 'ip_allocation': 'none',
+ 'binding:vif_details': {
+ 'connectivity': 'l2',
+ },
+ }
+
+ mock_show.return_value = port
+
+ requested_networks = objects.NetworkRequestList(
+ objects=[objects.NetworkRequest(port_id=port['id'])])
+ count = self.api.validate_networks(self.context, requested_networks, 1)
+ self.assertEqual(1, count)
+
@mock.patch('oslo_concurrency.lockutils.lock')
def test_get_instance_nw_info_locks_per_instance(self, mock_lock):
instance = objects.Instance(uuid=uuids.fake)
@@ -4356,7 +4534,7 @@ class TestAPI(TestAPIBase):
def test_update_instance_vnic_index(self, mock_get_client,
mock_refresh_extensions):
api = neutronapi.API()
- api.extensions = set([constants.VNIC_INDEX_EXT])
+ api.extensions = set([constants.VNIC_INDEX])
mock_client = mock_get_client.return_value
mock_client.update_port.return_value = 'port'
@@ -4381,7 +4559,7 @@ class TestAPI(TestAPIBase):
self, get_client_mock
):
instance = fake_instance.fake_instance_obj(self.context)
- self.api._has_port_binding_extension = mock.Mock(return_value=True)
+ self.api.has_port_binding_extension = mock.Mock(return_value=True)
# We pass in a port profile which has a migration attribute and also
# a second port profile attribute 'fake_profile' this can be
@@ -4425,7 +4603,7 @@ class TestAPI(TestAPIBase):
value is None.
"""
instance = fake_instance.fake_instance_obj(self.context)
- self.api._has_port_binding_extension = mock.Mock(return_value=True)
+ self.api.has_port_binding_extension = mock.Mock(return_value=True)
fake_ports = {'ports': [
{'id': uuids.portid,
@@ -4477,16 +4655,16 @@ class TestAPI(TestAPIBase):
'device_owner': 'compute:%s' %
instance.availability_zone}})
+ @mock.patch.object(neutronapi.API, '_get_vf_pci_device_profile',
+ new=mock.Mock(return_value={}))
@mock.patch(
'nova.network.neutron.API.has_extended_resource_request_extension',
new=mock.Mock(return_value=False),
)
@mock.patch.object(pci_whitelist.Whitelist, 'get_devspec')
@mock.patch.object(neutronapi, 'get_client', return_value=mock.Mock())
- def test_update_port_bindings_for_instance_with_pci(self,
- get_client_mock,
- get_pci_device_devspec_mock):
-
+ def test_update_port_bindings_for_instance_with_pci(
+ self, get_client_mock, get_pci_device_devspec_mock):
devspec = mock.Mock()
devspec.get_tags.return_value = {'physical_network': 'physnet1'}
get_pci_device_devspec_mock.return_value = devspec
@@ -4494,17 +4672,21 @@ class TestAPI(TestAPIBase):
instance = fake_instance.fake_instance_obj(self.context)
instance.migration_context = objects.MigrationContext()
instance.migration_context.old_pci_devices = objects.PciDeviceList(
- objects=[objects.PciDevice(vendor_id='1377',
- product_id='0047',
- address='0000:0a:00.1',
- compute_node_id=1,
- request_id='1234567890')])
+ objects=[objects.PciDevice(
+ vendor_id='1377',
+ product_id='0047',
+ address='0000:0a:00.1',
+ compute_node_id=1,
+ request_id='1234567890',
+ dev_type=obj_fields.PciDeviceType.SRIOV_VF)])
instance.migration_context.new_pci_devices = objects.PciDeviceList(
- objects=[objects.PciDevice(vendor_id='1377',
- product_id='0047',
- address='0000:0b:00.1',
- compute_node_id=2,
- request_id='1234567890')])
+ objects=[objects.PciDevice(
+ vendor_id='1377',
+ product_id='0047',
+ address='0000:0b:00.1',
+ compute_node_id=2,
+ request_id='1234567890',
+ dev_type=obj_fields.PciDeviceType.SRIOV_VF)])
instance.pci_devices = instance.migration_context.old_pci_devices
# Validate that non-direct port aren't updated (fake-port-2).
@@ -4597,7 +4779,7 @@ class TestAPI(TestAPIBase):
def test_update_port_bindings_for_instance_with_pci_no_migration(self,
get_client_mock,
get_pci_device_devspec_mock):
- self.api._has_port_binding_extension = mock.Mock(return_value=True)
+ self.api.has_port_binding_extension = mock.Mock(return_value=True)
devspec = mock.Mock()
devspec.get_tags.return_value = {'physical_network': 'physnet1'}
@@ -4647,7 +4829,7 @@ class TestAPI(TestAPIBase):
def test_update_port_bindings_for_instance_with_same_host_failed_vif_type(
self, get_client_mock):
instance = fake_instance.fake_instance_obj(self.context)
- self.api._has_port_binding_extension = mock.Mock(return_value=True)
+ self.api.has_port_binding_extension = mock.Mock(return_value=True)
list_ports_mock = mock.Mock()
update_port_mock = mock.Mock()
@@ -4692,7 +4874,7 @@ class TestAPI(TestAPIBase):
def test_update_port_bindings_for_instance_with_diff_host_unbound_vif_type(
self, get_client_mock):
instance = fake_instance.fake_instance_obj(self.context)
- self.api._has_port_binding_extension = mock.Mock(return_value=True)
+ self.api.has_port_binding_extension = mock.Mock(return_value=True)
binding_profile = {'fake_profile': 'fake_data',
constants.MIGRATING_ATTR: 'my-dest-host'}
@@ -4775,6 +4957,174 @@ class TestAPI(TestAPIBase):
'nova.network.neutron.API.has_extended_resource_request_extension',
new=mock.Mock(return_value=False),
)
+ @mock.patch.object(pci_whitelist.Whitelist, 'get_devspec')
+ @mock.patch.object(neutronapi, 'get_client', return_value=mock.Mock())
+ def test_update_port_bindings_for_instance_with_sriov_pf(
+ self, get_client_mock, get_pci_device_devspec_mock
+ ):
+ devspec = mock.Mock()
+ devspec.get_tags.return_value = {'physical_network': 'physnet1'}
+ get_pci_device_devspec_mock.return_value = devspec
+
+ instance = fake_instance.fake_instance_obj(self.context)
+ instance.migration_context = objects.MigrationContext()
+ instance.migration_context.old_pci_devices = objects.PciDeviceList(
+ objects=[
+ objects.PciDevice(
+ vendor_id='8086',
+ product_id='154d',
+ address='0000:0a:01',
+ compute_node_id=1,
+ request_id=uuids.pci_req,
+ dev_type=obj_fields.PciDeviceType.SRIOV_PF,
+ extra_info={'mac_address': 'b4:96:91:34:f4:36'},
+ )
+ ]
+ )
+ instance.pci_devices = instance.migration_context.old_pci_devices
+ instance.migration_context.new_pci_devices = objects.PciDeviceList(
+ objects=[
+ objects.PciDevice(
+ vendor_id='8086',
+ product_id='154d',
+ address='0000:0a:02',
+ compute_node_id=2,
+ request_id=uuids.pci_req,
+ dev_type=obj_fields.PciDeviceType.SRIOV_PF,
+ extra_info={'mac_address': 'b4:96:91:34:f4:dd'},
+ )
+ ]
+ )
+ instance.pci_devices = instance.migration_context.new_pci_devices
+
+ fake_ports = {
+ 'ports': [
+ {
+ 'id': uuids.port,
+ 'binding:vnic_type': 'direct-physical',
+ constants.BINDING_HOST_ID: 'fake-host-old',
+ constants.BINDING_PROFILE: {
+ 'pci_slot': '0000:0a:01',
+ 'physical_network': 'old_phys_net',
+ 'pci_vendor_info': 'old_pci_vendor_info',
+ },
+ },
+ ]
+ }
+
+ migration = objects.Migration(
+ status='confirmed', migration_type='migration')
+ list_ports_mock = mock.Mock(return_value=fake_ports)
+ get_client_mock.return_value.list_ports = list_ports_mock
+
+ update_port_mock = mock.Mock()
+ get_client_mock.return_value.update_port = update_port_mock
+
+ self.api._update_port_binding_for_instance(
+ self.context, instance, instance.host, migration)
+
+ # Assert that update_port is called with the binding:profile
+ # corresponding to the PCI device specified including MAC address.
+ update_port_mock.assert_called_once_with(
+ uuids.port,
+ {
+ 'port': {
+ constants.BINDING_HOST_ID: 'fake-host',
+ 'device_owner': 'compute:%s' % instance.availability_zone,
+ constants.BINDING_PROFILE: {
+ 'pci_slot': '0000:0a:02',
+ 'physical_network': 'physnet1',
+ 'pci_vendor_info': '8086:154d',
+ 'device_mac_address': 'b4:96:91:34:f4:dd',
+ },
+ }
+ },
+ )
+
+ @mock.patch(
+ 'nova.network.neutron.API.has_extended_resource_request_extension',
+ new=mock.Mock(return_value=False),
+ )
+ @mock.patch.object(pci_whitelist.Whitelist, 'get_devspec')
+ @mock.patch.object(neutronapi, 'get_client', return_value=mock.Mock())
+ def test_update_port_bindings_for_instance_with_sriov_pf_no_migration(
+ self, get_client_mock, get_pci_device_devspec_mock
+ ):
+ devspec = mock.Mock()
+ devspec.get_tags.return_value = {'physical_network': 'physnet1'}
+ get_pci_device_devspec_mock.return_value = devspec
+
+ instance = fake_instance.fake_instance_obj(self.context)
+ instance.pci_requests = objects.InstancePCIRequests(
+ instance_uuid=instance.uuid,
+ requests=[
+ objects.InstancePCIRequest(
+ requester_id=uuids.port,
+ request_id=uuids.pci_req,
+ )
+ ],
+ )
+ instance.pci_devices = objects.PciDeviceList(
+ objects=[
+ objects.PciDevice(
+ vendor_id='8086',
+ product_id='154d',
+ address='0000:0a:02',
+ compute_node_id=2,
+ request_id=uuids.pci_req,
+ dev_type=obj_fields.PciDeviceType.SRIOV_PF,
+ extra_info={'mac_address': 'b4:96:91:34:f4:36'},
+ )
+ ]
+ )
+
+ fake_ports = {
+ 'ports': [
+ {
+ 'id': uuids.port,
+ 'binding:vnic_type': 'direct-physical',
+ constants.BINDING_HOST_ID: 'fake-host-old',
+ constants.BINDING_PROFILE: {
+ 'pci_slot': '0000:0a:01',
+ 'physical_network': 'old_phys_net',
+ 'pci_vendor_info': 'old_pci_vendor_info',
+ 'device_mac_address': 'b4:96:91:34:f4:dd'
+ },
+ },
+ ]
+ }
+
+ list_ports_mock = mock.Mock(return_value=fake_ports)
+ get_client_mock.return_value.list_ports = list_ports_mock
+
+ update_port_mock = mock.Mock()
+ get_client_mock.return_value.update_port = update_port_mock
+
+ self.api._update_port_binding_for_instance(
+ self.context, instance, instance.host)
+
+ # Assert that update_port is called with the binding:profile
+ # corresponding to the PCI device specified including MAC address.
+ update_port_mock.assert_called_once_with(
+ uuids.port,
+ {
+ 'port': {
+ constants.BINDING_HOST_ID: 'fake-host',
+ 'device_owner': 'compute:%s' % instance.availability_zone,
+ constants.BINDING_PROFILE: {
+ 'pci_slot': '0000:0a:02',
+ 'physical_network': 'physnet1',
+ 'pci_vendor_info': '8086:154d',
+ 'device_mac_address': 'b4:96:91:34:f4:36',
+ },
+ }
+ },
+ )
+
+ @mock.patch(
+ 'nova.network.neutron.API.has_extended_resource_request_extension',
+ new=mock.Mock(return_value=False),
+ )
@mock.patch.object(neutronapi, 'get_client', return_value=mock.Mock())
def test_update_port_bindings_for_instance_with_resource_req(
self, get_client_mock):
@@ -4982,7 +5332,7 @@ class TestAPI(TestAPIBase):
self, get_client_mock):
instance = fake_instance.fake_instance_obj(self.context)
- self.api._has_port_binding_extension = mock.Mock(return_value=True)
+ self.api.has_port_binding_extension = mock.Mock(return_value=True)
# We test with an instance host and destination_host where the
# port will be moving.
get_ports = {'ports': [
@@ -5012,7 +5362,7 @@ class TestAPI(TestAPIBase):
destination host and the binding:profile is None in the port.
"""
instance = fake_instance.fake_instance_obj(self.context)
- self.api._has_port_binding_extension = mock.Mock(return_value=True)
+ self.api.has_port_binding_extension = mock.Mock(return_value=True)
# We test with an instance host and destination_host where the
# port will be moving but with binding:profile set to None.
get_ports = {
@@ -5043,7 +5393,7 @@ class TestAPI(TestAPIBase):
self, get_client_mock):
instance = fake_instance.fake_instance_obj(self.context)
- self.api._has_port_binding_extension = mock.Mock(return_value=True)
+ self.api.has_port_binding_extension = mock.Mock(return_value=True)
port_id = uuids.port_id
get_ports = {'ports': [
{'id': port_id,
@@ -5063,7 +5413,7 @@ class TestAPI(TestAPIBase):
self, get_client_mock):
instance = fake_instance.fake_instance_obj(self.context)
- self.api._has_port_binding_extension = mock.Mock(return_value=True)
+ self.api.has_port_binding_extension = mock.Mock(return_value=True)
get_ports = {'ports': [
{'id': uuids.port_id,
constants.BINDING_HOST_ID: instance.host}]}
@@ -5099,10 +5449,10 @@ class TestAPI(TestAPIBase):
self, get_client_mock):
instance = fake_instance.fake_instance_obj(self.context)
- self.api._has_port_binding_extension = mock.Mock(return_value=True)
+ self.api.has_port_binding_extension = mock.Mock(return_value=True)
migrate_profile = {
constants.MIGRATING_ATTR: 'new-host'}
- # Pass a port with an migration porfile attribute.
+ # Pass a port with an migration profile attribute.
port_id = uuids.port_id
get_ports = {'ports': [
{'id': port_id,
@@ -5111,8 +5461,9 @@ class TestAPI(TestAPIBase):
self.api.list_ports = mock.Mock(return_value=get_ports)
mocked_client = get_client_mock.return_value
- with mock.patch.object(self.api, 'supports_port_binding_extension',
- return_value=True):
+ with mock.patch.object(
+ self.api, 'has_port_binding_extension', return_value=True,
+ ):
self.api.setup_networks_on_host(self.context,
instance,
host='new-host',
@@ -5130,10 +5481,10 @@ class TestAPI(TestAPIBase):
which is raised through to the caller.
"""
instance = fake_instance.fake_instance_obj(self.context)
- self.api._has_port_binding_extension = mock.Mock(return_value=True)
+ self.api.has_port_binding_extension = mock.Mock(return_value=True)
migrate_profile = {
constants.MIGRATING_ATTR: 'new-host'}
- # Pass a port with an migration porfile attribute.
+ # Pass a port with an migration profile attribute.
get_ports = {
'ports': [
{'id': uuids.port1,
@@ -5148,8 +5499,9 @@ class TestAPI(TestAPIBase):
mocked_client = get_client_mock.return_value
mocked_client.delete_port_binding.side_effect = NeutronError
- with mock.patch.object(self.api, 'supports_port_binding_extension',
- return_value=True):
+ with mock.patch.object(
+ self.api, 'has_port_binding_extension', return_value=True,
+ ):
ex = self.assertRaises(
exception.PortBindingDeletionFailed,
self.api.setup_networks_on_host,
@@ -5171,15 +5523,15 @@ class TestAPI(TestAPIBase):
self, get_client_mock):
instance = fake_instance.fake_instance_obj(self.context)
- self.api._has_port_binding_extension = mock.Mock(return_value=True)
- # Pass a port without any migration porfile attribute.
+ self.api.has_port_binding_extension = mock.Mock(return_value=True)
+ # Pass a port without any migration profile attribute.
get_ports = {'ports': [
{'id': uuids.port_id,
constants.BINDING_HOST_ID: instance.host}]}
self.api.list_ports = mock.Mock(return_value=get_ports)
update_port_mock = mock.Mock()
get_client_mock.return_value.update_port = update_port_mock
- with mock.patch.object(self.api, 'supports_port_binding_extension',
+ with mock.patch.object(self.api, 'has_port_binding_extension',
return_value=False):
self.api.setup_networks_on_host(self.context,
instance,
@@ -5212,7 +5564,8 @@ class TestAPI(TestAPIBase):
self.assertEqual(['2', '3'], result, "Invalid preexisting ports")
@mock.patch('nova.network.neutron.API._show_port')
- def _test_unbind_ports_get_client(self, mock_neutron, mock_show):
+ @mock.patch('nova.network.neutron.get_client')
+ def test_unbind_ports_get_client(self, mock_neutron, mock_show):
mock_ctx = mock.Mock(is_admin=False)
ports = ["1", "2", "3"]
@@ -5228,23 +5581,18 @@ class TestAPI(TestAPIBase):
self.assertEqual(1, mock_neutron.call_count)
mock_neutron.assert_has_calls(get_client_calls, True)
- @mock.patch('nova.network.neutron.get_client')
- def test_unbind_ports_get_client_binding_extension(self,
- mock_neutron):
- self._test_unbind_ports_get_client(mock_neutron)
-
- @mock.patch('nova.network.neutron.get_client')
- def test_unbind_ports_get_client(self, mock_neutron):
- self._test_unbind_ports_get_client(mock_neutron)
-
+ @mock.patch('nova.network.neutron.API.has_dns_extension',
+ new=mock.Mock(return_value=False))
@mock.patch('nova.network.neutron.API._show_port')
- def _test_unbind_ports(self, mock_neutron, mock_show):
+ @mock.patch('nova.network.neutron.get_client')
+ def test_unbind_ports(self, mock_neutron, mock_show):
mock_client = mock.Mock()
mock_update_port = mock.Mock()
mock_client.update_port = mock_update_port
mock_ctx = mock.Mock(is_admin=False)
ports = ["1", "2", "3"]
mock_show.side_effect = [{"id": "1"}, {"id": "2"}, {"id": "3"}]
+
api = neutronapi.API()
api._unbind_ports(mock_ctx, ports, mock_neutron, mock_client)
@@ -5258,14 +5606,6 @@ class TestAPI(TestAPIBase):
self.assertEqual(3, mock_update_port.call_count)
mock_update_port.assert_has_calls(update_port_calls)
- @mock.patch('nova.network.neutron.get_client')
- def test_unbind_ports_binding_ext(self, mock_neutron):
- self._test_unbind_ports(mock_neutron)
-
- @mock.patch('nova.network.neutron.get_client')
- def test_unbind_ports(self, mock_neutron):
- self._test_unbind_ports(mock_neutron)
-
def test_unbind_ports_no_port_ids(self):
# Tests that None entries in the ports list are filtered out.
mock_client = mock.Mock()
@@ -5279,7 +5619,11 @@ class TestAPI(TestAPIBase):
@mock.patch(
'nova.network.neutron.API.has_extended_resource_request_extension',
- new=mock.Mock()
+ new=mock.Mock(return_value=True),
+ )
+ @mock.patch(
+ 'nova.network.neutron.API.has_dns_extension',
+ new=mock.Mock(return_value=True),
)
@mock.patch('nova.network.neutron.API.get_instance_nw_info')
@mock.patch('nova.network.neutron.excutils')
@@ -5822,9 +6166,13 @@ class TestAPI(TestAPIBase):
mock_nc.list_ports.return_value = {'ports': []}
if fip_ext_enabled:
- self.api.extensions = [constants.FIP_PORT_DETAILS]
+ self.api.extensions = {
+ constants.FIP_PORT_DETAILS: {
+ 'alias': constants.FIP_PORT_DETAILS,
+ },
+ }
else:
- self.api.extensions = []
+ self.api.extensions = {}
fips = self.api.get_floating_ips_by_project(self.context)
@@ -5857,6 +6205,8 @@ class TestAPI(TestAPIBase):
"""Make sure we don't fail for floating IPs without attached ports."""
self._test_get_floating_ips_by_project(False, False)
+ @mock.patch('nova.network.neutron.API.has_dns_extension',
+ new=mock.Mock(return_value=True))
@mock.patch('nova.network.neutron.API._show_port')
def test_unbind_ports_reset_dns_name_by_admin(self, mock_show):
neutron = mock.Mock()
@@ -5867,7 +6217,6 @@ class TestAPI(TestAPIBase):
}
}
port_client = mock.Mock()
- self.api.extensions = [constants.DNS_INTEGRATION]
ports = [uuids.port_id]
mock_show.return_value = {'id': uuids.port}
self.api._unbind_ports(self.context, ports, neutron, port_client)
@@ -5880,6 +6229,8 @@ class TestAPI(TestAPIBase):
uuids.port_id, port_req_body)
neutron.update_port.assert_not_called()
+ @mock.patch('nova.network.neutron.API.has_dns_extension',
+ new=mock.Mock(return_value=True))
@mock.patch('nova.network.neutron.API._show_port')
def test_unbind_ports_reset_dns_name_by_non_admin(self, mock_show):
neutron = mock.Mock()
@@ -5890,7 +6241,6 @@ class TestAPI(TestAPIBase):
}
}
port_client = mock.Mock()
- self.api.extensions = [constants.DNS_INTEGRATION]
ports = [uuids.port_id]
mock_show.return_value = {'id': uuids.port}
self.api._unbind_ports(self.context, ports, neutron, port_client)
@@ -5904,6 +6254,8 @@ class TestAPI(TestAPIBase):
neutron.update_port.assert_called_once_with(
uuids.port_id, non_admin_port_req_body)
+ @mock.patch('nova.network.neutron.API.has_dns_extension',
+ new=mock.Mock(return_value=False))
@mock.patch('nova.network.neutron.API._show_port')
def test_unbind_ports_reset_allocation_in_port_binding(self, mock_show):
neutron = mock.Mock()
@@ -5919,6 +6271,8 @@ class TestAPI(TestAPIBase):
port_client.update_port.assert_called_once_with(
uuids.port_id, port_req_body)
+ @mock.patch('nova.network.neutron.API.has_dns_extension',
+ new=mock.Mock(return_value=False))
@mock.patch('nova.network.neutron.API._show_port')
def test_unbind_ports_reset_binding_profile(self, mock_show):
neutron = mock.Mock()
@@ -5928,20 +6282,22 @@ class TestAPI(TestAPIBase):
'id': uuids.port,
'binding:profile': {'pci_vendor_info': '1377:0047',
'pci_slot': '0000:0a:00.1',
+ 'card_serial_number': 'MT2113X00000',
'physical_network': 'physnet1',
'capabilities': ['switchdev']}
}
self.api._unbind_ports(self.context, ports, neutron, port_client)
port_req_body = {'port': {'binding:host_id': None,
'binding:profile':
- {'physical_network': 'physnet1',
- 'capabilities': ['switchdev']},
+ {'capabilities': ['switchdev']},
'device_id': '',
'device_owner': ''}
}
port_client.update_port.assert_called_once_with(
uuids.port_id, port_req_body)
+ @mock.patch('nova.network.neutron.API.has_dns_extension',
+ new=mock.Mock(return_value=False))
@mock.patch('nova.network.neutron.API._populate_neutron_extension_values')
@mock.patch('nova.network.neutron.API._update_port',
# called twice, fails on the 2nd call and triggers the cleanup
@@ -6014,7 +6370,6 @@ class TestAPI(TestAPIBase):
def test_unbind_ports_port_show_portnotfound(self, mock_log, mock_show):
api = neutronapi.API()
neutron_client = mock.Mock()
- mock_show.return_value = {'id': uuids.port}
api._unbind_ports(self.context, [uuids.port_id],
neutron_client, neutron_client)
mock_show.assert_called_once_with(
@@ -6023,6 +6378,65 @@ class TestAPI(TestAPIBase):
neutron_client=mock.ANY)
mock_log.assert_not_called()
+ @mock.patch(
+ 'nova.network.neutron.API.has_dns_extension',
+ new=mock.Mock(return_value=False),
+ )
+ @mock.patch('nova.network.neutron.API._show_port')
+ @mock.patch.object(neutronapi, 'LOG')
+ def test_unbind_ports_port_show_portnotfound_multiple_ports(
+ self, mock_log, mock_show,
+ ):
+ """Ensure we continue unbinding ports even when one isn't found."""
+ mock_show.side_effect = [
+ exception.PortNotFound(port_id=uuids.port_a),
+ {'id': uuids.port_b},
+ ]
+ api = neutronapi.API()
+ neutron_client = mock.Mock()
+
+ api._unbind_ports(
+ self.context,
+ [uuids.port_a, uuids.port_b],
+ neutron_client,
+ neutron_client,
+ )
+
+ mock_show.assert_has_calls(
+ [
+ mock.call(
+ self.context,
+ uuids.port_a,
+ fields=['binding:profile', 'network_id'],
+ neutron_client=neutron_client,
+ ),
+ mock.call(
+ self.context,
+ uuids.port_b,
+ fields=['binding:profile', 'network_id'],
+ neutron_client=neutron_client,
+ ),
+ ]
+ )
+ # Only the port that exists should be updated
+ neutron_client.update_port.assert_called_once_with(
+ uuids.port_b,
+ {
+ 'port': {
+ 'device_id': '',
+ 'device_owner': '',
+ 'binding:profile': {},
+ 'binding:host_id': None,
+ }
+ }
+ )
+ mock_log.exception.assert_not_called()
+ mock_log.debug.assert_called_with(
+ 'Unable to show port %s as it no longer exists.', uuids.port_a,
+ )
+
+ @mock.patch('nova.network.neutron.API.has_dns_extension',
+ new=mock.Mock(return_value=False))
@mock.patch('nova.network.neutron.API._show_port',
side_effect=Exception)
@mock.patch.object(neutronapi.LOG, 'exception')
@@ -6040,9 +6454,11 @@ class TestAPI(TestAPIBase):
'binding:profile': {}, 'binding:host_id': None}})
self.assertTrue(mock_log.called)
+ @mock.patch('nova.network.neutron.API.has_dns_extension',
+ new=mock.Mock(return_value=False))
@mock.patch('nova.network.neutron.API._show_port')
@mock.patch.object(neutronapi.LOG, 'exception')
- def test_unbind_ports_portnotfound(self, mock_log, mock_show):
+ def test_unbind_ports_port_update_portnotfound(self, mock_log, mock_show):
api = neutronapi.API()
neutron_client = mock.Mock()
neutron_client.update_port = mock.Mock(
@@ -6056,9 +6472,13 @@ class TestAPI(TestAPIBase):
'binding:profile': {}, 'binding:host_id': None}})
mock_log.assert_not_called()
+ @mock.patch('nova.network.neutron.API.has_dns_extension',
+ new=mock.Mock(return_value=False))
@mock.patch('nova.network.neutron.API._show_port')
@mock.patch.object(neutronapi.LOG, 'exception')
- def test_unbind_ports_unexpected_error(self, mock_log, mock_show):
+ def test_unbind_ports_port_update_unexpected_error(
+ self, mock_log, mock_show,
+ ):
api = neutronapi.API()
neutron_client = mock.Mock()
neutron_client.update_port = mock.Mock(
@@ -6140,7 +6560,8 @@ class TestAPI(TestAPIBase):
objects.NetworkRequest(port_id=uuids.portid_4),
objects.NetworkRequest(port_id=uuids.portid_5),
objects.NetworkRequest(port_id=uuids.trusted_port),
- objects.NetworkRequest(port_id=uuids.portid_vdpa)])
+ objects.NetworkRequest(port_id=uuids.portid_vdpa),
+ objects.NetworkRequest(port_id=uuids.portid_remote_managed)])
pci_requests = objects.InstancePCIRequests(requests=[])
# _get_port_vnic_info should be called for every NetworkRequest with a
# port_id attribute (so six times)
@@ -6154,13 +6575,14 @@ class TestAPI(TestAPIBase):
(model.VNIC_TYPE_DIRECT, True, 'netN',
mock.sentinel.resource_request2, None, None),
(model.VNIC_TYPE_VDPA, None, 'netN', None, None, None),
+ (model.VNIC_TYPE_REMOTE_MANAGED, None, 'netN', None, None, None),
]
# _get_physnet_tunneled_info should be called for every NetworkRequest
# (so seven times)
mock_get_physnet_tunneled_info.side_effect = [
('physnet1', False), ('physnet1', False), ('', True),
('physnet1', False), ('physnet2', False), ('physnet3', False),
- ('physnet4', False), ('physnet1', False)
+ ('physnet4', False), ('physnet1', False), ('physnet1', False),
]
api = neutronapi.API()
@@ -6177,13 +6599,16 @@ class TestAPI(TestAPIBase):
mock.sentinel.request_group1,
mock.sentinel.request_group2],
port_resource_requests)
- self.assertEqual(6, len(pci_requests.requests))
+ self.assertEqual(7, len(pci_requests.requests))
has_pci_request_id = [net.pci_request_id is not None for net in
requested_networks.objects]
self.assertEqual(pci_requests.requests[3].spec[0]["dev_type"],
"type-PF")
self.assertEqual(pci_requests.requests[5].spec[0]["dev_type"], "vdpa")
- expected_results = [True, False, False, True, True, True, True, True]
+ self.assertEqual(pci_requests.requests[6].spec[0]["remote_managed"],
+ 'True')
+ expected_results = [True, False, False, True, True, True, True, True,
+ True]
self.assertEqual(expected_results, has_pci_request_id)
# Make sure only the trusted VF has the 'trusted' tag set in the spec.
for pci_req in pci_requests.requests:
@@ -6195,11 +6620,23 @@ class TestAPI(TestAPIBase):
else:
self.assertNotIn(pci_request.PCI_TRUSTED_TAG, spec)
+ # Only remote-managed ports must have the remote_managed tag set
+ # to True.
+ for pci_req in pci_requests.requests:
+ spec = pci_req.spec[0]
+ if pci_req.requester_id == uuids.portid_remote_managed:
+ self.assertEqual('True',
+ spec[pci_request.PCI_REMOTE_MANAGED_TAG])
+ else:
+ self.assertEqual('False',
+ spec[pci_request.PCI_REMOTE_MANAGED_TAG])
+
# Only SRIOV ports and those with a resource_request will have
# pci_req.requester_id.
self.assertEqual(
[uuids.portid_1, uuids.portid_3, uuids.portid_4, uuids.portid_5,
- uuids.trusted_port, uuids.portid_vdpa],
+ uuids.trusted_port, uuids.portid_vdpa,
+ uuids.portid_remote_managed],
[pci_req.requester_id for pci_req in pci_requests.requests])
self.assertCountEqual(
@@ -6671,7 +7108,7 @@ class TestAPI(TestAPIBase):
"""Tests that migrate_instance_start exits early if neutron doesn't
have the binding-extended API extension.
"""
- with mock.patch.object(self.api, 'supports_port_binding_extension',
+ with mock.patch.object(self.api, 'has_port_binding_extension',
return_value=False):
self.api.migrate_instance_start(
self.context, mock.sentinel.instance, {})
@@ -6691,8 +7128,9 @@ class TestAPI(TestAPIBase):
migration = objects.Migration(
source_compute='source', dest_compute='dest')
- with mock.patch.object(self.api, 'supports_port_binding_extension',
- return_value=True):
+ with mock.patch.object(
+ self.api, 'has_port_binding_extension', return_value=True,
+ ):
self.api.migrate_instance_start(
self.context, instance, migration)
@@ -6716,8 +7154,9 @@ class TestAPI(TestAPIBase):
migration = objects.Migration(
source_compute='source', dest_compute='dest')
- with mock.patch.object(self.api, 'supports_port_binding_extension',
- return_value=True):
+ with mock.patch.object(
+ self.api, 'has_port_binding_extension', return_value=True,
+ ):
self.api.migrate_instance_start(
self.context, instance, migration)
@@ -6743,8 +7182,9 @@ class TestAPI(TestAPIBase):
migration = objects.Migration(
source_compute='source', dest_compute='dest')
- with mock.patch.object(self.api, 'supports_port_binding_extension',
- return_value=True):
+ with mock.patch.object(
+ self.api, 'has_port_binding_extension', return_value=True,
+ ):
self.api.migrate_instance_start(
self.context, instance, migration)
@@ -6767,8 +7207,9 @@ class TestAPI(TestAPIBase):
migration = objects.Migration(
source_compute='source', dest_compute='dest')
- with mock.patch.object(self.api, 'supports_port_binding_extension',
- return_value=True):
+ with mock.patch.object(
+ self.api, 'has_port_binding_extension', return_value=True,
+ ):
self.api.migrate_instance_start(
self.context, instance, migration)
@@ -6945,13 +7386,17 @@ class TestAPI(TestAPIBase):
req_lvl_params.same_subtree,
)
- def test_get_segment_ids_for_network_no_segment_ext(self):
+ @mock.patch.object(neutronapi, 'get_client')
+ def test_get_segment_ids_for_network_no_segment_ext(self, mock_client):
+ mocked_client = mock.create_autospec(client.Client)
+ mock_client.return_value = mocked_client
with mock.patch.object(
- self.api, '_has_segment_extension', return_value=False
+ self.api, 'has_segment_extension', return_value=False,
):
self.assertEqual(
[], self.api.get_segment_ids_for_network(self.context,
uuids.network_id))
+ mock_client.assert_called_once_with(self.context, admin=True)
@mock.patch.object(neutronapi, 'get_client')
def test_get_segment_ids_for_network_passes(self, mock_client):
@@ -6960,26 +7405,44 @@ class TestAPI(TestAPIBase):
mock_client.return_value = mocked_client
mocked_client.list_subnets.return_value = subnets
with mock.patch.object(
- self.api, '_has_segment_extension', return_value=True
+ self.api, 'has_segment_extension', return_value=True,
):
res = self.api.get_segment_ids_for_network(
self.context, uuids.network_id)
self.assertEqual([uuids.segment_id], res)
+ mock_client.assert_called_once_with(self.context, admin=True)
mocked_client.list_subnets.assert_called_once_with(
network_id=uuids.network_id, fields='segment_id')
@mock.patch.object(neutronapi, 'get_client')
- def test_get_segment_ids_for_network_with_no_segments(self, mock_client):
+ def test_get_segment_ids_for_network_with_segments_none(self, mock_client):
subnets = {'subnets': [{'segment_id': None}]}
mocked_client = mock.create_autospec(client.Client)
mock_client.return_value = mocked_client
mocked_client.list_subnets.return_value = subnets
with mock.patch.object(
- self.api, '_has_segment_extension', return_value=True
+ self.api, 'has_segment_extension', return_value=True,
+ ):
+ res = self.api.get_segment_ids_for_network(
+ self.context, uuids.network_id)
+ self.assertEqual([], res)
+ mock_client.assert_called_once_with(self.context, admin=True)
+ mocked_client.list_subnets.assert_called_once_with(
+ network_id=uuids.network_id, fields='segment_id')
+
+ @mock.patch.object(neutronapi, 'get_client')
+ def test_get_segment_ids_for_network_with_no_segments(self, mock_client):
+ subnets = {'subnets': [{}]}
+ mocked_client = mock.create_autospec(client.Client)
+ mock_client.return_value = mocked_client
+ mocked_client.list_subnets.return_value = subnets
+ with mock.patch.object(
+ self.api, 'has_segment_extension', return_value=True,
):
res = self.api.get_segment_ids_for_network(
self.context, uuids.network_id)
self.assertEqual([], res)
+ mock_client.assert_called_once_with(self.context, admin=True)
mocked_client.list_subnets.assert_called_once_with(
network_id=uuids.network_id, fields='segment_id')
@@ -6990,19 +7453,24 @@ class TestAPI(TestAPIBase):
mocked_client.list_subnets.side_effect = (
exceptions.NeutronClientException(status_code=404))
with mock.patch.object(
- self.api, '_has_segment_extension', return_value=True
+ self.api, 'has_segment_extension', return_value=True,
):
self.assertRaises(exception.InvalidRoutedNetworkConfiguration,
self.api.get_segment_ids_for_network,
self.context, uuids.network_id)
+ mock_client.assert_called_once_with(self.context, admin=True)
- def test_get_segment_id_for_subnet_no_segment_ext(self):
+ @mock.patch.object(neutronapi, 'get_client')
+ def test_get_segment_id_for_subnet_no_segment_ext(self, mock_client):
+ mocked_client = mock.create_autospec(client.Client)
+ mock_client.return_value = mocked_client
with mock.patch.object(
- self.api, '_has_segment_extension', return_value=False
+ self.api, 'has_segment_extension', return_value=False,
):
self.assertIsNone(
self.api.get_segment_id_for_subnet(self.context,
uuids.subnet_id))
+ mock_client.assert_called_once_with(self.context, admin=True)
@mock.patch.object(neutronapi, 'get_client')
def test_get_segment_id_for_subnet_passes(self, mock_client):
@@ -7011,11 +7479,12 @@ class TestAPI(TestAPIBase):
mock_client.return_value = mocked_client
mocked_client.show_subnet.return_value = subnet
with mock.patch.object(
- self.api, '_has_segment_extension', return_value=True
+ self.api, 'has_segment_extension', return_value=True,
):
res = self.api.get_segment_id_for_subnet(
self.context, uuids.subnet_id)
self.assertEqual(uuids.segment_id, res)
+ mock_client.assert_called_once_with(self.context, admin=True)
mocked_client.show_subnet.assert_called_once_with(uuids.subnet_id)
@mock.patch.object(neutronapi, 'get_client')
@@ -7025,11 +7494,12 @@ class TestAPI(TestAPIBase):
mock_client.return_value = mocked_client
mocked_client.show_subnet.return_value = subnet
with mock.patch.object(
- self.api, '_has_segment_extension', return_value=True
+ self.api, 'has_segment_extension', return_value=True,
):
self.assertIsNone(
self.api.get_segment_id_for_subnet(self.context,
uuids.subnet_id))
+ mock_client.assert_called_once_with(self.context, admin=True)
@mock.patch.object(neutronapi, 'get_client')
def test_get_segment_id_for_subnet_fails(self, mock_client):
@@ -7038,35 +7508,35 @@ class TestAPI(TestAPIBase):
mocked_client.show_subnet.side_effect = (
exceptions.NeutronClientException(status_code=404))
with mock.patch.object(
- self.api, '_has_segment_extension', return_value=True
+ self.api, 'has_segment_extension', return_value=True,
):
self.assertRaises(exception.InvalidRoutedNetworkConfiguration,
self.api.get_segment_id_for_subnet,
self.context, uuids.subnet_id)
+ mock_client.assert_called_once_with(self.context, admin=True)
@mock.patch.object(neutronapi.LOG, 'debug')
- def test_get_port_pci_slot(self, mock_debug):
+ def test_get_port_pci_dev(self, mock_debug):
fake_port = {'id': uuids.fake_port_id}
request = objects.InstancePCIRequest(requester_id=uuids.fake_port_id,
request_id=uuids.pci_request_id)
bad_request = objects.InstancePCIRequest(
requester_id=uuids.wrong_port_id)
- device = objects.PciDevice(request_id=uuids.pci_request_id,
- address='fake-pci-address')
+ device = objects.PciDevice(request_id=uuids.pci_request_id)
bad_device = objects.PciDevice(request_id=uuids.wrong_request_id)
# Test the happy path
instance = objects.Instance(
pci_requests=objects.InstancePCIRequests(requests=[request]),
pci_devices=objects.PciDeviceList(objects=[device]))
self.assertEqual(
- 'fake-pci-address',
- self.api._get_port_pci_slot(self.context, instance, fake_port))
+ device,
+ self.api._get_port_pci_dev(instance, fake_port))
# Test not finding the request
instance = objects.Instance(
pci_requests=objects.InstancePCIRequests(
requests=[objects.InstancePCIRequest(bad_request)]))
self.assertIsNone(
- self.api._get_port_pci_slot(self.context, instance, fake_port))
+ self.api._get_port_pci_dev(instance, fake_port))
mock_debug.assert_called_with('No PCI request found for port %s',
uuids.fake_port_id, instance=instance)
mock_debug.reset_mock()
@@ -7075,7 +7545,7 @@ class TestAPI(TestAPIBase):
pci_requests=objects.InstancePCIRequests(requests=[request]),
pci_devices=objects.PciDeviceList(objects=[bad_device]))
self.assertIsNone(
- self.api._get_port_pci_slot(self.context, instance, fake_port))
+ self.api._get_port_pci_dev(instance, fake_port))
mock_debug.assert_called_with('No PCI device found for request %s',
uuids.pci_request_id, instance=instance)
@@ -7246,9 +7716,9 @@ class TestInstanceHasExtendedResourceRequest(TestAPIBase):
self.addCleanup(patcher.stop)
self.mock_client = patcher.start().return_value
self.extension = {
- "extensions": [
+ 'extensions': [
{
- "name": constants.RESOURCE_REQUEST_GROUPS_EXTENSION,
+ 'alias': constants.RESOURCE_REQUEST_GROUPS,
}
]
}
@@ -7364,6 +7834,41 @@ class TestAPIModuleMethods(test.NoDBTestCase):
self.assertEqual(networks, [{'id': 1}, {'id': 2}, {'id': 3}])
+ @mock.patch('nova.network.neutron.LOG.info')
+ @mock.patch('nova.network.neutron.LOG.exception')
+ @mock.patch('nova.objects.instance_info_cache.InstanceInfoCache.save')
+ def test_update_instance_cache_with_nw_info_not_found(self, mock_save,
+ mock_log_exc,
+ mock_log_info):
+ """Tests that an attempt to update (save) the instance info cache will
+ not log a traceback but will reraise the exception for caller handling.
+ """
+ # Simulate the oslo.messaging created "<OriginalClass>_Remote" subclass
+ # type we'll be catching.
+ class InstanceNotFound_Remote(exception.InstanceNotFound):
+
+ def __init__(self, message=None, **kwargs):
+ super().__init__(message=message, **kwargs)
+
+ # Simulate a long exception message containing tracebacks because
+ # oslo.messaging appends them.
+ message = 'Instance was not found.\n'.ljust(255, '*')
+ mock_save.side_effect = InstanceNotFound_Remote(message=message,
+ instance_id=uuids.inst)
+ api = neutronapi.API()
+ ctxt = context.get_context()
+ instance = fake_instance.fake_instance_obj(ctxt, uuid=uuids.i)
+
+ self.assertRaises(
+ exception.InstanceNotFound,
+ neutronapi.update_instance_cache_with_nw_info, api, ctxt, instance,
+ nw_info=model.NetworkInfo())
+
+ # Verify we didn't log exception at level ERROR.
+ mock_log_exc.assert_not_called()
+ # Verify exception message was truncated before logging it.
+ self.assertLessEqual(len(mock_log_info.call_args.args[1]), 255)
+
class TestAPIPortbinding(TestAPIBase):
@@ -7390,25 +7895,83 @@ class TestAPIPortbinding(TestAPIBase):
mock_get_client.assert_called_once_with(mock.ANY)
mocked_client.list_extensions.assert_called_once_with()
+ @mock.patch.object(
+ neutronapi.API, '_get_vf_pci_device_profile',
+ new=mock.Mock(return_value={
+ 'pf_mac_address': '52:54:00:1e:59:c6',
+ 'vf_num': 1,
+ }))
@mock.patch.object(pci_whitelist.Whitelist, 'get_devspec')
- @mock.patch.object(pci_manager, 'get_instance_pci_devs')
- def test_populate_neutron_extension_values_binding_sriov(self,
- mock_get_instance_pci_devs,
- mock_get_pci_device_devspec):
+ @mock.patch('nova.objects.Instance.get_pci_devices')
+ def test_populate_neutron_extension_values_binding_sriov(
+ self, mock_get_instance_pci_devs, mock_get_pci_device_devspec):
host_id = 'my_host_id'
- instance = {'host': host_id}
+ instance = objects.Instance(host=host_id)
port_req_body = {'port': {}}
pci_req_id = 'my_req_id'
pci_dev = {'vendor_id': '1377',
'product_id': '0047',
'address': '0000:0a:00.1',
+ 'card_serial_number': None,
+ 'dev_type': obj_fields.PciDeviceType.SRIOV_VF,
}
PciDevice = collections.namedtuple('PciDevice',
- ['vendor_id', 'product_id', 'address'])
+ ['vendor_id', 'product_id', 'address',
+ 'card_serial_number', 'dev_type'])
mydev = PciDevice(**pci_dev)
profile = {'pci_vendor_info': '1377:0047',
'pci_slot': '0000:0a:00.1',
'physical_network': 'physnet1',
+ 'pf_mac_address': '52:54:00:1e:59:c6',
+ 'vf_num': 1,
+ }
+
+ mock_get_instance_pci_devs.return_value = [mydev]
+ devspec = mock.Mock()
+ devspec.get_tags.return_value = {'physical_network': 'physnet1'}
+ mock_get_pci_device_devspec.return_value = devspec
+
+ self.api._populate_neutron_binding_profile(
+ instance, pci_req_id, port_req_body, None)
+
+ self.assertEqual(profile,
+ port_req_body['port'][
+ constants.BINDING_PROFILE])
+
+ @mock.patch.object(
+ neutronapi.API, '_get_vf_pci_device_profile',
+ new=mock.Mock(return_value= {
+ 'pf_mac_address': '52:54:00:1e:59:c6',
+ 'vf_num': 1,
+ 'card_serial_number': 'MT2113X00000',
+ })
+ )
+ @mock.patch.object(pci_whitelist.Whitelist, 'get_devspec')
+ @mock.patch('nova.objects.Instance.get_pci_devices')
+ def test_populate_neutron_extension_values_binding_sriov_card_serial(
+ self, mock_get_instance_pci_devs, mock_get_pci_device_devspec):
+ host_id = 'my_host_id'
+ instance = objects.Instance(host=host_id)
+ port_req_body = {'port': {}}
+ pci_req_id = 'my_req_id'
+ pci_dev = {'vendor_id': 'a2d6',
+ 'product_id': '15b3',
+ 'address': '0000:0a:00.1',
+ 'card_serial_number': 'MT2113X00000',
+ 'dev_type': obj_fields.PciDeviceType.SRIOV_VF,
+ }
+ PciDevice = collections.namedtuple('PciDevice',
+ ['vendor_id', 'product_id', 'address',
+ 'card_serial_number', 'dev_type'])
+ mydev = PciDevice(**pci_dev)
+ profile = {'pci_vendor_info': 'a2d6:15b3',
+ 'pci_slot': '0000:0a:00.1',
+ 'physical_network': 'physnet1',
+ # card_serial_number is a property of the object obtained
+ # from extra_info.
+ 'card_serial_number': 'MT2113X00000',
+ 'pf_mac_address': '52:54:00:1e:59:c6',
+ 'vf_num': 1,
}
mock_get_instance_pci_devs.return_value = [mydev]
@@ -7460,13 +8023,19 @@ class TestAPIPortbinding(TestAPIBase):
profile,
port_req_body['port'][constants.BINDING_PROFILE])
+ @mock.patch.object(
+ neutronapi.API, '_get_vf_pci_device_profile',
+ new=mock.Mock(return_value= {
+ 'pf_mac_address': '52:54:00:1e:59:c6',
+ 'vf_num': 1,
+ })
+ )
@mock.patch.object(pci_whitelist.Whitelist, 'get_devspec')
- @mock.patch.object(pci_manager, 'get_instance_pci_devs')
- def test_populate_neutron_extension_values_binding_sriov_with_cap(self,
- mock_get_instance_pci_devs,
- mock_get_pci_device_devspec):
+ @mock.patch('nova.objects.Instance.get_pci_devices')
+ def test_populate_neutron_extension_values_binding_sriov_with_cap(
+ self, mock_get_instance_pci_devs, mock_get_pci_device_devspec):
host_id = 'my_host_id'
- instance = {'host': host_id}
+ instance = objects.Instance(host=host_id)
port_req_body = {'port': {
constants.BINDING_PROFILE: {
'capabilities': ['switchdev']}}}
@@ -7474,20 +8043,26 @@ class TestAPIPortbinding(TestAPIBase):
pci_dev = {'vendor_id': '1377',
'product_id': '0047',
'address': '0000:0a:00.1',
+ 'card_serial_number': None,
+ 'dev_type': obj_fields.PciDeviceType.SRIOV_VF,
}
PciDevice = collections.namedtuple('PciDevice',
- ['vendor_id', 'product_id', 'address'])
+ ['vendor_id', 'product_id', 'address',
+ 'card_serial_number', 'dev_type'])
mydev = PciDevice(**pci_dev)
profile = {'pci_vendor_info': '1377:0047',
'pci_slot': '0000:0a:00.1',
'physical_network': 'physnet1',
'capabilities': ['switchdev'],
+ 'pf_mac_address': '52:54:00:1e:59:c6',
+ 'vf_num': 1,
}
mock_get_instance_pci_devs.return_value = [mydev]
devspec = mock.Mock()
devspec.get_tags.return_value = {'physical_network': 'physnet1'}
mock_get_pci_device_devspec.return_value = devspec
+
self.api._populate_neutron_binding_profile(
instance, pci_req_id, port_req_body, None)
@@ -7496,11 +8071,145 @@ class TestAPIPortbinding(TestAPIBase):
constants.BINDING_PROFILE])
@mock.patch.object(pci_whitelist.Whitelist, 'get_devspec')
- @mock.patch.object(pci_manager, 'get_instance_pci_devs')
+ @mock.patch('nova.objects.Instance.get_pci_devices')
+ def test_populate_neutron_extension_values_binding_sriov_pf(
+ self, mock_get_instance_pci_devs, mock_get_devspec
+ ):
+ host_id = 'my_host_id'
+ instance = objects.Instance(host=host_id)
+ port_req_body = {'port': {}}
+
+ pci_dev = objects.PciDevice(
+ request_id=uuids.pci_req,
+ address='0000:01:00',
+ parent_addr='0000:02:00',
+ vendor_id='8086',
+ product_id='154d',
+ dev_type=obj_fields.PciDeviceType.SRIOV_PF,
+ extra_info={'mac_address': 'b4:96:91:34:f4:36'}
+ )
+
+ expected_profile = {
+ 'pci_vendor_info': '8086:154d',
+ 'pci_slot': '0000:01:00',
+ 'physical_network': 'physnet1',
+ 'device_mac_address': 'b4:96:91:34:f4:36',
+ }
+
+ mock_get_instance_pci_devs.return_value = [pci_dev]
+ devspec = mock.Mock()
+ devspec.get_tags.return_value = {'physical_network': 'physnet1'}
+ mock_get_devspec.return_value = devspec
+
+ self.api._populate_neutron_binding_profile(
+ instance, uuids.pci_req, port_req_body, None)
+
+ self.assertEqual(
+ expected_profile,
+ port_req_body['port'][constants.BINDING_PROFILE]
+ )
+
+ @mock.patch.object(
+ pci_utils, 'get_vf_num_by_pci_address',
+ new=mock.MagicMock(side_effect=(lambda vf_a: 1
+ if vf_a == '0000:0a:00.1' else None)))
+ @mock.patch.object(
+ pci_utils, 'get_mac_by_pci_address',
+ new=mock.MagicMock(side_effect=(lambda vf_a: {
+ '0000:0a:00.0': '52:54:00:1e:59:c6'}.get(vf_a)))
+ )
+ def test__get_vf_pci_device_profile(self):
+ pci_dev = {'vendor_id': 'a2d6',
+ 'product_id': '15b3',
+ 'address': '0000:0a:00.1',
+ 'parent_addr': '0000:0a:00.0',
+ 'card_serial_number': 'MT2113X00000',
+ 'sriov_cap': {
+ 'pf_mac_address': '52:54:00:1e:59:c6',
+ 'vf_num': 1,
+ },
+ 'dev_type': obj_fields.PciDeviceType.SRIOV_VF,
+ }
+ PciDevice = collections.namedtuple('PciDevice',
+ ['vendor_id', 'product_id', 'address',
+ 'card_serial_number', 'sriov_cap',
+ 'dev_type', 'parent_addr'])
+ mydev = PciDevice(**pci_dev)
+ self.assertEqual(self.api._get_vf_pci_device_profile(mydev),
+ {'pf_mac_address': '52:54:00:1e:59:c6',
+ 'vf_num': 1,
+ 'card_serial_number': 'MT2113X00000'})
+
+ @mock.patch.object(
+ neutronapi.API, '_get_vf_pci_device_profile',
+ new=mock.MagicMock(side_effect=(
+ lambda dev: {'0000:0a:00.1': {
+ 'pf_mac_address': '52:54:00:1e:59:c6',
+ 'vf_num': 1,
+ 'card_serial_number': 'MT2113X00000',
+ }}.get(dev.address)
+ )))
+ @mock.patch.object(pci_whitelist.Whitelist, 'get_devspec')
+ def test__get_pci_device_profile_vf(self, mock_get_pci_device_devspec):
+ devspec = mock.Mock()
+ devspec.get_tags.return_value = {'physical_network': 'physnet1'}
+ mock_get_pci_device_devspec.return_value = devspec
+
+ pci_dev = {'vendor_id': 'a2d6',
+ 'product_id': '15b3',
+ 'address': '0000:0a:00.1',
+ 'card_serial_number': 'MT2113X00000',
+ 'dev_type': obj_fields.PciDeviceType.SRIOV_VF,
+ }
+ PciDevice = collections.namedtuple('PciDevice',
+ ['vendor_id', 'product_id', 'address',
+ 'card_serial_number', 'dev_type'])
+ mydev = PciDevice(**pci_dev)
+
+ self.assertEqual({'card_serial_number': 'MT2113X00000',
+ 'pci_slot': '0000:0a:00.1',
+ 'pci_vendor_info': 'a2d6:15b3',
+ 'pf_mac_address': '52:54:00:1e:59:c6',
+ 'physical_network': 'physnet1',
+ 'vf_num': 1},
+ self.api._get_pci_device_profile(mydev))
+
+ @mock.patch.object(pci_whitelist.Whitelist, 'get_devspec')
+ def test__get_pci_device_profile_pf(self, mock_get_pci_device_devspec):
+ devspec = mock.Mock()
+ devspec.get_tags.return_value = {'physical_network': 'physnet1'}
+ mock_get_pci_device_devspec.return_value = devspec
+
+ pci_dev = objects.PciDevice(
+ request_id=uuids.pci_req,
+ address='0000:0a:00.0',
+ parent_addr='0000:02:00',
+ vendor_id='a2d6',
+ product_id='15b3',
+ dev_type=obj_fields.PciDeviceType.SRIOV_PF,
+ extra_info={
+ 'capabilities': jsonutils.dumps(
+ {'card_serial_number': 'MT2113X00000'}),
+ 'mac_address': 'b4:96:91:34:f4:36',
+ },
+
+ )
+ self.assertEqual(
+ {
+ 'pci_slot': '0000:0a:00.0',
+ 'pci_vendor_info': 'a2d6:15b3',
+ 'physical_network': 'physnet1',
+ 'device_mac_address': 'b4:96:91:34:f4:36',
+ },
+ self.api._get_pci_device_profile(pci_dev),
+ )
+
+ @mock.patch.object(pci_whitelist.Whitelist, 'get_devspec')
+ @mock.patch('nova.objects.Instance.get_pci_devices')
def test_populate_neutron_extension_values_binding_sriov_fail(
self, mock_get_instance_pci_devs, mock_get_pci_device_devspec):
host_id = 'my_host_id'
- instance = {'host': host_id}
+ instance = objects.Instance(host=host_id)
port_req_body = {'port': {}}
pci_req_id = 'my_req_id'
pci_objs = [objects.PciDevice(vendor_id='1377',
@@ -7517,7 +8226,7 @@ class TestAPIPortbinding(TestAPIBase):
self.api._populate_neutron_binding_profile,
instance, pci_req_id, port_req_body, None)
- @mock.patch.object(pci_manager, 'get_instance_pci_devs', return_value=[])
+ @mock.patch('nova.objects.Instance.get_pci_devices', return_value=[])
def test_populate_neutron_binding_profile_pci_dev_not_found(
self, mock_get_instance_pci_devs):
api = neutronapi.API()
@@ -7528,28 +8237,52 @@ class TestAPIPortbinding(TestAPIBase):
api._populate_neutron_binding_profile,
instance, pci_req_id, port_req_body, None)
mock_get_instance_pci_devs.assert_called_once_with(
- instance, pci_req_id)
+ request_id=pci_req_id)
- @mock.patch.object(pci_manager, 'get_instance_pci_devs')
- def test_pci_parse_whitelist_called_once(self,
- mock_get_instance_pci_devs):
- white_list = [
- '{"address":"0000:0a:00.1","physical_network":"default"}']
- cfg.CONF.set_override('passthrough_whitelist', white_list, 'pci')
+ @mock.patch.object(
+ pci_utils, 'is_physical_function',
+ new=mock.Mock(return_value=False)
+ )
+ @mock.patch.object(
+ pci_utils, 'get_vf_num_by_pci_address',
+ new=mock.MagicMock(
+ side_effect=(lambda vf_a: {'0000:0a:00.1': 1}.get(vf_a)))
+ )
+ @mock.patch.object(
+ pci_utils, 'get_mac_by_pci_address',
+ new=mock.MagicMock(side_effect=(lambda vf_a: {
+ '0000:0a:00.0': '52:54:00:1e:59:c6'}.get(vf_a)))
+ )
+ @mock.patch('nova.objects.Instance.get_pci_devices')
+ def test_pci_parse_whitelist_called_once(
+ self, mock_get_instance_pci_devs
+ ):
+ device_spec = [
+ jsonutils.dumps(
+ {
+ "address": "0000:0a:00.1",
+ "physical_network": "default",
+ }
+ )
+ ]
+ cfg.CONF.set_override(
+ 'device_spec', device_spec, 'pci')
# NOTE(takashin): neutronapi.API must be initialized
- # after the 'passthrough_whitelist' is set in this test case.
+ # after the 'device_spec' is set in this test case.
api = neutronapi.API()
host_id = 'my_host_id'
- instance = {'host': host_id}
+ instance = objects.Instance(host=host_id)
pci_req_id = 'my_req_id'
port_req_body = {'port': {}}
pci_dev = {'vendor_id': '1377',
'product_id': '0047',
'address': '0000:0a:00.1',
+ 'parent_addr': '0000:0a:00.0',
+ 'dev_type': obj_fields.PciDeviceType.SRIOV_VF,
}
- whitelist = pci_whitelist.Whitelist(CONF.pci.passthrough_whitelist)
+ whitelist = pci_whitelist.Whitelist(CONF.pci.device_spec)
with mock.patch.object(pci_whitelist.Whitelist,
'_parse_white_list_from_config',
wraps=whitelist._parse_white_list_from_config
@@ -7575,7 +8308,7 @@ class TestAPIPortbinding(TestAPIBase):
vf.update_device(pci_dev)
return instance, pf, vf
- @mock.patch.object(pci_manager, 'get_instance_pci_devs')
+ @mock.patch('nova.objects.Instance.get_pci_devices')
@mock.patch.object(pci_utils, 'get_mac_by_pci_address')
def test_populate_pci_mac_address_pf(self, mock_get_mac_by_pci_address,
mock_get_instance_pci_devs):
@@ -7589,7 +8322,7 @@ class TestAPIPortbinding(TestAPIBase):
self.api._populate_pci_mac_address(instance, 0, req)
self.assertEqual(expected_port_req_body, req)
- @mock.patch.object(pci_manager, 'get_instance_pci_devs')
+ @mock.patch('nova.objects.Instance.get_pci_devices')
@mock.patch.object(pci_utils, 'get_mac_by_pci_address')
def test_populate_pci_mac_address_vf(self, mock_get_mac_by_pci_address,
mock_get_instance_pci_devs):
@@ -7601,7 +8334,7 @@ class TestAPIPortbinding(TestAPIBase):
self.api._populate_pci_mac_address(instance, 42, port_req_body)
self.assertEqual(port_req_body, req)
- @mock.patch.object(pci_manager, 'get_instance_pci_devs')
+ @mock.patch('nova.objects.Instance.get_pci_devices')
@mock.patch.object(pci_utils, 'get_mac_by_pci_address')
def test_populate_pci_mac_address_vf_fail(self,
mock_get_mac_by_pci_address,
@@ -7616,7 +8349,7 @@ class TestAPIPortbinding(TestAPIBase):
self.api._populate_pci_mac_address(instance, 42, port_req_body)
self.assertEqual(port_req_body, req)
- @mock.patch.object(pci_manager, 'get_instance_pci_devs')
+ @mock.patch('nova.objects.Instance.get_pci_devices')
@mock.patch('nova.network.neutron.LOG.error')
def test_populate_pci_mac_address_no_device(self, mock_log_error,
mock_get_instance_pci_devs):
@@ -7774,7 +8507,7 @@ class TestAPIPortbinding(TestAPIBase):
self.assertEqual(1, mocked_client.create_port_binding.call_count)
self.assertDictEqual({uuids.port: binding['binding']}, result)
- # assert that that if vnic_type and profile are set in VIF object
+ # assert that if vnic_type and profile are set in VIF object
# the provided vnic_type and profile take precedence.
nwinfo = model.NetworkInfo([model.VIF(id=uuids.port,
@@ -7852,6 +8585,9 @@ class TestAPIPortbinding(TestAPIBase):
self.api.delete_port_binding(self.context, port_id,
'fake-host')
+ @mock.patch(
+ 'nova.network.neutron.API.has_dns_extension',
+ new=mock.Mock(return_value=False))
@mock.patch('nova.accelerator.cyborg._CyborgClient.delete_arqs_by_uuid')
@mock.patch('nova.network.neutron.get_binding_profile')
@mock.patch('nova.network.neutron.API._show_port')
@@ -8241,7 +8977,7 @@ class TestAllocateForInstance(test.NoDBTestCase):
requested_ports_dict = {uuids.port1: {}, uuids.port2: {}}
mock_neutron.list_extensions.return_value = {"extensions": [
- {"name": "asdf"}]}
+ {"alias": "asdf"}]}
port1 = {"port": {"id": uuids.port1, "mac_address": "mac1r"}}
port2 = {"port": {"id": uuids.port2, "mac_address": "mac2r"}}
mock_admin.update_port.side_effect = [port1, port2]
@@ -8324,6 +9060,10 @@ class TestAPINeutronHostnameDNSPortbinding(TestAPIBase):
requested_networks=requested_networks)
@mock.patch(
+ 'nova.network.neutron.API.has_dns_extension',
+ new=mock.Mock(return_value=True),
+ )
+ @mock.patch(
'nova.network.neutron.API.has_extended_resource_request_extension',
new=mock.Mock(return_value=False)
)
@@ -8336,8 +9076,8 @@ class TestAPINeutronHostnameDNSPortbinding(TestAPIBase):
11, dns_extension=True, bind_host_id=self.instance.get('host'))
@mock.patch(
- "nova.network.neutron.API._has_dns_extension",
- new=mock.Mock(return_value=True)
+ 'nova.network.neutron.API.has_dns_extension',
+ new=mock.Mock(return_value=True),
)
def test_allocate_for_instance_with_requested_port_with_dns_domain(self):
# The port's dns_name attribute should be set by the port update
diff --git a/nova/tests/unit/network/test_os_vif_util.py b/nova/tests/unit/network/test_os_vif_util.py
index e15e4eb92a..338492aef0 100644
--- a/nova/tests/unit/network/test_os_vif_util.py
+++ b/nova/tests/unit/network/test_os_vif_util.py
@@ -696,6 +696,39 @@ class OSVIFUtilTestCase(test.NoDBTestCase):
self.assertObjEqual(expect, actual)
+ def test_nova_to_osvif_ovs_with_vnic_remote_managed(self):
+ vif = model.VIF(
+ id="dc065497-3c8d-4f44-8fb4-e1d33c16a536",
+ type=model.VIF_TYPE_OVS,
+ address="22:52:25:62:e2:aa",
+ vnic_type=model.VNIC_TYPE_REMOTE_MANAGED,
+ network=model.Network(
+ id="b82c1929-051e-481d-8110-4669916c7915",
+ label="Demo Net",
+ subnets=[]),
+ profile={'pci_slot': '0000:0a:00.1'}
+ )
+
+ actual = os_vif_util.nova_to_osvif_vif(vif)
+
+ expect = osv_objects.vif.VIFHostDevice(
+ id="dc065497-3c8d-4f44-8fb4-e1d33c16a536",
+ active=False,
+ address="22:52:25:62:e2:aa",
+ dev_address='0000:0a:00.1',
+ dev_type=os_vif_fields.VIFHostDeviceDevType.ETHERNET,
+ plugin="noop",
+ has_traffic_filtering=False,
+ preserve_on_delete=False,
+ network=osv_objects.network.Network(
+ id="b82c1929-051e-481d-8110-4669916c7915",
+ bridge_interface=None,
+ label="Demo Net",
+ subnets=osv_objects.subnet.SubnetList(
+ objects=[])))
+
+ self.assertObjEqual(expect, actual)
+
def test_nova_to_osvif_ovs_with_vnic_vdpa(self):
vif = model.VIF(
id="dc065497-3c8d-4f44-8fb4-e1d33c16a536",
diff --git a/nova/tests/unit/network/test_security_group.py b/nova/tests/unit/network/test_security_group.py
index b0bde1d9a2..a76dd4bf3c 100644
--- a/nova/tests/unit/network/test_security_group.py
+++ b/nova/tests/unit/network/test_security_group.py
@@ -13,10 +13,11 @@
# License for the specific language governing permissions and limitations
# under the License.
#
-import mock
from neutronclient.common import exceptions as n_exc
from neutronclient.neutron import v2_0 as neutronv20
from neutronclient.v2_0 import client
+from unittest import mock
+
from oslo_utils.fixture import uuidsentinel as uuids
from nova import context
diff --git a/nova/tests/unit/notifications/objects/test_flavor.py b/nova/tests/unit/notifications/objects/test_flavor.py
index 41fc8a36c3..e3cb9ec4c3 100644
--- a/nova/tests/unit/notifications/objects/test_flavor.py
+++ b/nova/tests/unit/notifications/objects/test_flavor.py
@@ -11,8 +11,8 @@
# under the License.
import copy
+from unittest import mock
-import mock
from nova import context
from nova.notifications.objects import flavor as flavor_notification
diff --git a/nova/tests/unit/notifications/objects/test_instance.py b/nova/tests/unit/notifications/objects/test_instance.py
index c2b7315587..8735e972dc 100644
--- a/nova/tests/unit/notifications/objects/test_instance.py
+++ b/nova/tests/unit/notifications/objects/test_instance.py
@@ -10,7 +10,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import timeutils
diff --git a/nova/tests/unit/notifications/objects/test_notification.py b/nova/tests/unit/notifications/objects/test_notification.py
index 38d82d9ae9..de9e6f2762 100644
--- a/nova/tests/unit/notifications/objects/test_notification.py
+++ b/nova/tests/unit/notifications/objects/test_notification.py
@@ -12,8 +12,8 @@
# License for the specific language governing permissions and limitations
# under the License.
import collections
+from unittest import mock
-import mock
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import timeutils
from oslo_versionedobjects import fixture
@@ -386,7 +386,7 @@ notification_object_data = {
# ImageMetaProps, so when you see a fail here for that reason, you must
# *also* bump the version of ImageMetaPropsPayload. See its docstring for
# more information.
- 'ImageMetaPropsPayload': '1.8-080bdcba9b96122eab57bf39d47348f7',
+ 'ImageMetaPropsPayload': '1.12-b9c64832d7772c1973e913bacbe0e8f9',
'InstanceActionNotification': '1.0-a73147b93b520ff0061865849d3dfa56',
'InstanceActionPayload': '1.8-4fa3da9cbf0761f1f700ae578f36dc2f',
'InstanceActionRebuildNotification':
diff --git a/nova/tests/unit/notifications/objects/test_service.py b/nova/tests/unit/notifications/objects/test_service.py
index 6f0f5c7f7a..297dcac56f 100644
--- a/nova/tests/unit/notifications/objects/test_service.py
+++ b/nova/tests/unit/notifications/objects/test_service.py
@@ -14,8 +14,8 @@
# under the License.
import copy
+from unittest import mock
-import mock
from oslo_utils import timeutils
from nova import context
diff --git a/nova/tests/unit/notifications/test_base.py b/nova/tests/unit/notifications/test_base.py
index 3ee2e36ddc..c0468ec64d 100644
--- a/nova/tests/unit/notifications/test_base.py
+++ b/nova/tests/unit/notifications/test_base.py
@@ -13,9 +13,9 @@
# License for the specific language governing permissions and limitations
# under the License.
import datetime
+from unittest import mock
from keystoneauth1 import exceptions as ks_exc
-import mock
from oslo_utils.fixture import uuidsentinel as uuids
from nova import context as nova_context
diff --git a/nova/tests/unit/objects/test_aggregate.py b/nova/tests/unit/objects/test_aggregate.py
index bdb14f72ad..3f01c9613d 100644
--- a/nova/tests/unit/objects/test_aggregate.py
+++ b/nova/tests/unit/objects/test_aggregate.py
@@ -12,7 +12,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from oslo_utils.fixture import uuidsentinel
from oslo_utils import timeutils
diff --git a/nova/tests/unit/objects/test_block_device.py b/nova/tests/unit/objects/test_block_device.py
index 80c9e9a1fa..85959a961a 100644
--- a/nova/tests/unit/objects/test_block_device.py
+++ b/nova/tests/unit/objects/test_block_device.py
@@ -12,7 +12,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from oslo_utils.fixture import uuidsentinel as uuids
from nova import context
@@ -250,6 +251,14 @@ class _TestBlockDeviceMappingObject(object):
destination_type='local')
self.assertFalse(bdm.is_volume)
+ def test_is_local(self):
+ self.assertTrue(
+ objects.BlockDeviceMapping(
+ context=self.context, destination_type='local').is_local)
+ self.assertFalse(
+ objects.BlockDeviceMapping(
+ context=self.context, destination_type='volume').is_local)
+
def test_obj_load_attr_not_instance(self):
"""Tests that lazy-loading something other than the instance field
results in an error.
@@ -275,6 +284,11 @@ class _TestBlockDeviceMappingObject(object):
mock_inst_get_by_uuid.assert_called_once_with(
self.context, bdm.instance_uuid)
+ def test_obj_load_attr_encrypted(self):
+ bdm = objects.BlockDeviceMapping(self.context, **self.fake_bdm())
+ del bdm.encrypted
+ self.assertEqual(bdm.fields['encrypted'].default, bdm.encrypted)
+
def test_obj_make_compatible_pre_1_17(self):
values = {'source_type': 'volume', 'volume_id': 'fake-vol-id',
'destination_type': 'volume',
diff --git a/nova/tests/unit/objects/test_build_request.py b/nova/tests/unit/objects/test_build_request.py
index 2b60888c5d..a55ab34008 100644
--- a/nova/tests/unit/objects/test_build_request.py
+++ b/nova/tests/unit/objects/test_build_request.py
@@ -10,7 +10,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from oslo_serialization import jsonutils
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_versionedobjects import base as o_vo_base
diff --git a/nova/tests/unit/objects/test_cell_mapping.py b/nova/tests/unit/objects/test_cell_mapping.py
index 3182269cc5..936793294b 100644
--- a/nova/tests/unit/objects/test_cell_mapping.py
+++ b/nova/tests/unit/objects/test_cell_mapping.py
@@ -10,7 +10,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import uuidutils
diff --git a/nova/tests/unit/objects/test_compute_node.py b/nova/tests/unit/objects/test_compute_node.py
index 297edfbd55..84c4e87785 100644
--- a/nova/tests/unit/objects/test_compute_node.py
+++ b/nova/tests/unit/objects/test_compute_node.py
@@ -13,9 +13,10 @@
# under the License.
import copy
+from unittest import mock
-import mock
import netaddr
+from oslo_db import exception as db_exc
from oslo_serialization import jsonutils
from oslo_utils.fixture import uuidsentinel
from oslo_utils import timeutils
@@ -341,6 +342,14 @@ class _TestComputeNodeObject(object):
'uuid': uuidsentinel.fake_compute_node}
mock_create.assert_called_once_with(self.context, param_dict)
+ @mock.patch('nova.db.main.api.compute_node_create')
+ def test_create_duplicate(self, mock_create):
+ mock_create.side_effect = db_exc.DBDuplicateEntry
+ compute = compute_node.ComputeNode(context=self.context)
+ compute.service_id = 456
+ compute.hypervisor_hostname = 'node1'
+ self.assertRaises(exception.DuplicateRecord, compute.create)
+
@mock.patch.object(db, 'compute_node_update')
@mock.patch(
'nova.db.main.api.compute_node_get', return_value=fake_compute_node)
@@ -553,17 +562,15 @@ class _TestComputeNodeObject(object):
def test_update_from_virt_driver_uuid_already_set(self):
"""Tests update_from_virt_driver where the compute node object already
- has a uuid value so the uuid from the virt driver is ignored.
+ has a uuid value so an error is raised.
"""
# copy in case the update has a side effect
resources = copy.deepcopy(fake_resources)
# Emulate the ironic driver which adds a uuid field.
resources['uuid'] = uuidsentinel.node_uuid
compute = compute_node.ComputeNode(uuid=uuidsentinel.something_else)
- compute.update_from_virt_driver(resources)
- expected = fake_compute_with_resources.obj_clone()
- expected.uuid = uuidsentinel.something_else
- self.assertTrue(base.obj_equal_prims(expected, compute))
+ self.assertRaises(exception.InvalidNodeConfiguration,
+ compute.update_from_virt_driver, resources)
def test_update_from_virt_driver_missing_field(self):
# NOTE(pmurray): update_from_virt_driver does not require
@@ -666,8 +673,8 @@ class _TestComputeNodeObject(object):
CONF.initial_disk_allocation_ratio, compute.disk_allocation_ratio)
mock_update.assert_called_once_with(
- self.context, 123, {'cpu_allocation_ratio': 16.0,
- 'ram_allocation_ratio': 1.5,
+ self.context, 123, {'cpu_allocation_ratio': 4.0,
+ 'ram_allocation_ratio': 1.0,
'disk_allocation_ratio': 1.0})
@mock.patch('nova.db.main.api.compute_node_update')
@@ -694,8 +701,8 @@ class _TestComputeNodeObject(object):
CONF.initial_disk_allocation_ratio, compute.disk_allocation_ratio)
mock_update.assert_called_once_with(
- self.context, 123, {'cpu_allocation_ratio': 16.0,
- 'ram_allocation_ratio': 1.5,
+ self.context, 123, {'cpu_allocation_ratio': 4.0,
+ 'ram_allocation_ratio': 1.0,
'disk_allocation_ratio': 1.0})
@mock.patch('nova.db.main.api.compute_node_update')
@@ -722,8 +729,8 @@ class _TestComputeNodeObject(object):
CONF.initial_disk_allocation_ratio, compute.disk_allocation_ratio)
mock_update.assert_called_once_with(
- self.context, 123, {'cpu_allocation_ratio': 16.0,
- 'ram_allocation_ratio': 1.5,
+ self.context, 123, {'cpu_allocation_ratio': 4.0,
+ 'ram_allocation_ratio': 1.0,
'disk_allocation_ratio': 1.0})
def test_get_all_by_not_mapped(self):
diff --git a/nova/tests/unit/objects/test_console_auth_token.py b/nova/tests/unit/objects/test_console_auth_token.py
index 9c92e798b0..9a0901e12a 100644
--- a/nova/tests/unit/objects/test_console_auth_token.py
+++ b/nova/tests/unit/objects/test_console_auth_token.py
@@ -14,7 +14,7 @@
# under the License.
import copy
-import mock
+from unittest import mock
import urllib.parse as urlparse
from oslo_db.exception import DBDuplicateEntry
diff --git a/nova/tests/unit/objects/test_ec2.py b/nova/tests/unit/objects/test_ec2.py
index 8261fd6173..55230a7599 100644
--- a/nova/tests/unit/objects/test_ec2.py
+++ b/nova/tests/unit/objects/test_ec2.py
@@ -12,7 +12,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from oslo_utils.fixture import uuidsentinel as uuids
from nova.db.main import api as db
diff --git a/nova/tests/unit/objects/test_external_event.py b/nova/tests/unit/objects/test_external_event.py
index 915358ba59..58c45c2549 100644
--- a/nova/tests/unit/objects/test_external_event.py
+++ b/nova/tests/unit/objects/test_external_event.py
@@ -12,7 +12,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from nova.objects import external_event as external_event_obj
from nova.tests.unit.objects import test_objects
diff --git a/nova/tests/unit/objects/test_fields.py b/nova/tests/unit/objects/test_fields.py
index 39f9de8cfe..461dc0ff6f 100644
--- a/nova/tests/unit/objects/test_fields.py
+++ b/nova/tests/unit/objects/test_fields.py
@@ -15,9 +15,9 @@
import collections
import datetime
import os
+from unittest import mock
import iso8601
-import mock
from oslo_serialization import jsonutils
from oslo_versionedobjects import exception as ovo_exc
@@ -551,7 +551,7 @@ class TestNetworkModel(TestField):
def setUp(self):
super(TestNetworkModel, self).setUp()
model = network_model.NetworkInfo()
- self.field = fields.Field(fields.NetworkModel())
+ self.field = fields.NetworkModelField()
self.coerce_good_values = [(model, model), (model.json(), model)]
self.coerce_bad_values = [[], 'foo']
self.to_primitive_values = [(model, model.json())]
@@ -570,7 +570,7 @@ class TestNetworkVIFModel(TestField):
super(TestNetworkVIFModel, self).setUp()
model = network_model.VIF('6c197bc7-820c-40d5-8aff-7116b993e793')
primitive = jsonutils.dumps(model)
- self.field = fields.Field(fields.NetworkVIFModel())
+ self.field = fields.NetworkVIFModelField()
self.coerce_good_values = [(model, model), (primitive, model)]
self.coerce_bad_values = [[], 'foo']
self.to_primitive_values = [(model, primitive)]
diff --git a/nova/tests/unit/objects/test_flavor.py b/nova/tests/unit/objects/test_flavor.py
index 93294d95aa..4172d3fda3 100644
--- a/nova/tests/unit/objects/test_flavor.py
+++ b/nova/tests/unit/objects/test_flavor.py
@@ -13,8 +13,8 @@
# under the License.
import datetime
+from unittest import mock
-import mock
from oslo_db import exception as db_exc
from oslo_utils import uuidutils
diff --git a/nova/tests/unit/objects/test_host_mapping.py b/nova/tests/unit/objects/test_host_mapping.py
index 8917e318af..73eadb7047 100644
--- a/nova/tests/unit/objects/test_host_mapping.py
+++ b/nova/tests/unit/objects/test_host_mapping.py
@@ -10,7 +10,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from oslo_db import exception as db_exc
from oslo_utils.fixture import uuidsentinel as uuids
diff --git a/nova/tests/unit/objects/test_image_meta.py b/nova/tests/unit/objects/test_image_meta.py
index 1750caba01..371f7b101a 100644
--- a/nova/tests/unit/objects/test_image_meta.py
+++ b/nova/tests/unit/objects/test_image_meta.py
@@ -108,6 +108,7 @@ class TestImageMetaProps(test.NoDBTestCase):
'hw_video_model': 'vga',
'hw_video_ram': '512',
'hw_qemu_guest_agent': 'yes',
+ 'hw_locked_memory': 'true',
'trait:CUSTOM_TRUSTED': 'required',
# Fill sane values for the rest here
}
@@ -116,6 +117,7 @@ class TestImageMetaProps(test.NoDBTestCase):
self.assertEqual('vga', virtprops.hw_video_model)
self.assertEqual(512, virtprops.hw_video_ram)
self.assertTrue(virtprops.hw_qemu_guest_agent)
+ self.assertTrue(virtprops.hw_locked_memory)
self.assertIsNotNone(virtprops.traits_required)
self.assertIn('CUSTOM_TRUSTED', virtprops.traits_required)
@@ -285,6 +287,28 @@ class TestImageMetaProps(test.NoDBTestCase):
self.assertEqual([set([0, 1, 2, 3])],
virtprops.hw_numa_cpus)
+ def test_locked_memory_prop(self):
+ props = {'hw_locked_memory': 'true'}
+ virtprops = objects.ImageMetaProps.from_dict(props)
+ self.assertTrue(virtprops.hw_locked_memory)
+
+ def test_obj_make_compatible_hw_locked_memory(self):
+ """Check 'hw_locked_memory' compatibility."""
+ # assert that 'hw_locked_memory' is supported
+ # on a suitably new version
+ obj = objects.ImageMetaProps(
+ hw_locked_memory='true',
+ )
+ primitive = obj.obj_to_primitive('1.33')
+ self.assertIn('hw_locked_memory',
+ primitive['nova_object.data'])
+ self.assertTrue(primitive['nova_object.data']['hw_locked_memory'])
+
+ # and is absent on older versions
+ primitive = obj.obj_to_primitive('1.32')
+ self.assertNotIn('hw_locked_memory',
+ primitive['nova_object.data'])
+
def test_get_unnumbered_trait_fields(self):
"""Tests that only valid un-numbered required traits are parsed from
the properties.
@@ -349,6 +373,53 @@ class TestImageMetaProps(test.NoDBTestCase):
self.assertRaises(exception.ObjectActionError,
obj.obj_to_primitive, '1.0')
+ def test_obj_make_compatible_hw_ephemeral_encryption(self):
+ """Check 'hw_ephemeral_encryption(_format)' compatibility."""
+ # assert that 'hw_ephemeral_encryption' and
+ # 'hw_ephemeral_encryption_format' is supported
+ # on a suitably new version
+ new_fields = (
+ 'hw_ephemeral_encryption',
+ 'hw_ephemeral_encryption_format'
+ )
+ eph_format = objects.fields.BlockDeviceEncryptionFormatType.LUKS
+ obj = objects.ImageMetaProps(
+ hw_ephemeral_encryption='yes',
+ hw_ephemeral_encryption_format=eph_format,
+ )
+ primitive = obj.obj_to_primitive('1.32')
+ for field in new_fields:
+ self.assertIn(field, primitive['nova_object.data'])
+ self.assertTrue(
+ primitive['nova_object.data']['hw_ephemeral_encryption'])
+ self.assertEqual(
+ eph_format,
+ primitive['nova_object.data']['hw_ephemeral_encryption_format'])
+
+ # and is absent on older versions
+ primitive = obj.obj_to_primitive('1.31')
+ for field in new_fields:
+ self.assertNotIn(field, primitive['nova_object.data'])
+
+ def test_obj_make_compatible_hw_emulation(self):
+ """Check 'hw_emulation_architecture' compatibility."""
+ # assert that 'hw_emulation_architecture' is supported
+ # on a suitably new version
+ obj = objects.ImageMetaProps(
+ hw_emulation_architecture=objects.fields.Architecture.AARCH64,
+ )
+ primitive = obj.obj_to_primitive('1.31')
+ self.assertIn('hw_emulation_architecture',
+ primitive['nova_object.data'])
+ self.assertEqual(
+ objects.fields.Architecture.AARCH64,
+ primitive['nova_object.data']['hw_emulation_architecture'])
+
+ # and is absent on older versions
+ primitive = obj.obj_to_primitive('1.29')
+ self.assertNotIn('hw_emulation_architecture',
+ primitive['nova_object.data'])
+
def test_obj_make_compatible_input_bus(self):
"""Check 'hw_input_bus' compatibility."""
# assert that 'hw_input_bus' is supported on a suitably new version
@@ -467,3 +538,19 @@ class TestImageMetaProps(test.NoDBTestCase):
hw_pci_numa_affinity_policy=fields.PCINUMAAffinityPolicy.SOCKET)
self.assertRaises(exception.ObjectActionError,
obj.obj_to_primitive, '1.27')
+
+ def test_obj_make_compatible_viommu_model(self):
+ """Check 'hw_viommu_model' compatibility."""
+ # assert that 'hw_viommu_model' is supported on a suitably new version
+ obj = objects.ImageMetaProps(
+ hw_viommu_model=objects.fields.VIOMMUModel.VIRTIO,
+ )
+ primitive = obj.obj_to_primitive('1.34')
+ self.assertIn('hw_viommu_model', primitive['nova_object.data'])
+ self.assertEqual(
+ objects.fields.VIOMMUModel.VIRTIO,
+ primitive['nova_object.data']['hw_viommu_model'])
+
+ # and is absent on older versions
+ primitive = obj.obj_to_primitive('1.33')
+ self.assertNotIn('hw_viommu_model', primitive['nova_object.data'])
diff --git a/nova/tests/unit/objects/test_instance.py b/nova/tests/unit/objects/test_instance.py
index e187a4c251..6215d2be60 100644
--- a/nova/tests/unit/objects/test_instance.py
+++ b/nova/tests/unit/objects/test_instance.py
@@ -14,8 +14,8 @@
import collections
import datetime
+from unittest import mock
-import mock
import netaddr
from oslo_db import exception as db_exc
from oslo_serialization import jsonutils
@@ -25,6 +25,7 @@ from oslo_versionedobjects import base as ovo_base
from nova.compute import task_states
from nova.compute import vm_states
+from nova import context
from nova.db.main import api as db
from nova.db.main import models as sql_models
from nova import exception
@@ -2015,12 +2016,14 @@ class TestInstanceListObject(test_objects._LocalTest,
# manually here.
engine = db.get_engine()
table = sql_models.Instance.__table__
- with engine.connect() as conn:
- update = table.insert().values(user_id=self.context.user_id,
- project_id=self.context.project_id,
- uuid=uuids.nullinst,
- host='foo',
- hidden=None)
+ with engine.connect() as conn, conn.begin():
+ update = table.insert().values(
+ user_id=self.context.user_id,
+ project_id=self.context.project_id,
+ uuid=uuids.nullinst,
+ host='foo',
+ hidden=None,
+ )
conn.execute(update)
insts = objects.InstanceList.get_by_filters(self.context,
@@ -2071,3 +2074,164 @@ class TestInstanceObjectMisc(test.NoDBTestCase):
self.assertEqual(['metadata', 'system_metadata', 'info_cache',
'security_groups', 'pci_devices', 'tags', 'extra',
'extra.flavor'], result_list)
+
+
+class TestInstanceObjectGetPciDevices(test.NoDBTestCase):
+ def test_lazy_loading_pci_devices(self):
+ user_id = "fake-user"
+ project_id = "fake-project"
+ ctxt = context.RequestContext(user_id, project_id)
+
+ inst = instance.Instance(ctxt, uuid=uuids.instance)
+ with mock.patch(
+ "nova.objects.PciDeviceList.get_by_instance_uuid",
+ return_value=objects.PciDeviceList(),
+ ) as mock_get_pci:
+ self.assertEqual([], inst.get_pci_devices())
+
+ mock_get_pci.assert_called_once_with(ctxt, uuids.instance)
+
+ def test_lazy_loading_pci_requests(self):
+ user_id = "fake-user"
+ project_id = "fake-project"
+ ctxt = context.RequestContext(user_id, project_id)
+
+ devs = [objects.PciDevice(request_id=uuids.req1)]
+ inst = instance.Instance(
+ ctxt,
+ uuid=uuids.instance,
+ pci_devices=objects.PciDeviceList(
+ objects=devs
+ ),
+ )
+
+ with mock.patch(
+ "nova.objects.InstancePCIRequests.get_by_instance_uuid",
+ return_value=objects.InstancePCIRequests(
+ requests=[
+ objects.InstancePCIRequest(
+ request_id=uuids.req1,
+ alias_name="pci-alias-1",
+ ),
+ ]
+ ),
+ ) as mock_get_pci_req:
+ self.assertEqual(
+ devs,
+ inst.get_pci_devices(
+ source=objects.InstancePCIRequest.FLAVOR_ALIAS
+ ),
+ )
+
+ mock_get_pci_req.assert_called_once_with(ctxt, uuids.instance)
+
+ def test_no_filter(self):
+ devs = [objects.PciDevice()]
+
+ inst = instance.Instance(
+ pci_devices=objects.PciDeviceList(objects=devs)
+ )
+
+ self.assertEqual(devs, inst.get_pci_devices())
+
+ def test_no_filter_by_request_id(self):
+ expected_devs = [objects.PciDevice(request_id=uuids.req1)]
+ all_devs = expected_devs + [objects.PciDevice(request_id=uuids.req2)]
+
+ inst = instance.Instance(
+ pci_devices=objects.PciDeviceList(objects=all_devs)
+ )
+
+ self.assertEqual(
+ expected_devs, inst.get_pci_devices(request_id=uuids.req1)
+ )
+
+ def test_no_filter_by_source(self):
+ expected_devs = [
+ objects.PciDevice(request_id=uuids.req1),
+ objects.PciDevice(request_id=uuids.req1),
+ ]
+ all_devs = expected_devs + [objects.PciDevice(request_id=uuids.req2)]
+
+ inst = instance.Instance(
+ pci_devices=objects.PciDeviceList(objects=all_devs),
+ pci_requests=objects.InstancePCIRequests(
+ requests=[
+ objects.InstancePCIRequest(
+ request_id=uuids.req1,
+ alias_name="pci-alias-1",
+ ),
+ objects.InstancePCIRequest(
+ request_id=uuids.req2,
+ ),
+ ]
+ ),
+ )
+
+ self.assertEqual(
+ expected_devs,
+ inst.get_pci_devices(
+ source=objects.InstancePCIRequest.FLAVOR_ALIAS
+ ),
+ )
+
+ def test_no_filter_by_request_id_and_source(self):
+ expected_devs = []
+ all_devs = expected_devs + [
+ objects.PciDevice(request_id=uuids.req1),
+ objects.PciDevice(request_id=uuids.req2),
+ objects.PciDevice(request_id=uuids.req1),
+ ]
+
+ inst = instance.Instance(
+ pci_devices=objects.PciDeviceList(objects=all_devs),
+ pci_requests=objects.InstancePCIRequests(
+ requests=[
+ objects.InstancePCIRequest(
+ request_id=uuids.req1,
+ alias_name="pci-alias-1",
+ ),
+ objects.InstancePCIRequest(
+ request_id=uuids.req2,
+ ),
+ ]
+ ),
+ )
+
+ self.assertEqual(
+ expected_devs,
+ inst.get_pci_devices(
+ request_id=uuids.req1,
+ source=objects.InstancePCIRequest.NEUTRON_PORT,
+ ),
+ )
+
+ def test_old_pci_dev_and_req(self):
+ """This tests the case when the system has old InstancePCIRequest
+ objects without the request_id being filled. And therefore have
+ PciDevice object where the request_id is None too. These requests and
+ devices are always flavor based.
+ """
+ devs = [
+ objects.PciDevice(request_id=None),
+ objects.PciDevice(request_id=None),
+ ]
+
+ inst = instance.Instance(
+ pci_devices=objects.PciDeviceList(objects=devs),
+ pci_requests=objects.InstancePCIRequests(
+ requests=[
+ objects.InstancePCIRequest(
+ request_id=None,
+ alias_name="pci-alias-1",
+ ),
+ ]
+ ),
+ )
+
+ self.assertEqual(
+ devs,
+ inst.get_pci_devices(
+ source=objects.InstancePCIRequest.FLAVOR_ALIAS,
+ ),
+ )
diff --git a/nova/tests/unit/objects/test_instance_action.py b/nova/tests/unit/objects/test_instance_action.py
index 1743623b1c..8322102021 100644
--- a/nova/tests/unit/objects/test_instance_action.py
+++ b/nova/tests/unit/objects/test_instance_action.py
@@ -14,8 +14,8 @@
import copy
import traceback
+from unittest import mock
-import mock
from oslo_utils import fixture as utils_fixture
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import timeutils
diff --git a/nova/tests/unit/objects/test_instance_device_metadata.py b/nova/tests/unit/objects/test_instance_device_metadata.py
index 6f998db84e..c04d02dcb7 100644
--- a/nova/tests/unit/objects/test_instance_device_metadata.py
+++ b/nova/tests/unit/objects/test_instance_device_metadata.py
@@ -10,7 +10,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from oslo_serialization import jsonutils
from nova import objects
diff --git a/nova/tests/unit/objects/test_instance_fault.py b/nova/tests/unit/objects/test_instance_fault.py
index b19d8663c1..1816801fca 100644
--- a/nova/tests/unit/objects/test_instance_fault.py
+++ b/nova/tests/unit/objects/test_instance_fault.py
@@ -12,7 +12,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from oslo_utils.fixture import uuidsentinel as uuids
from nova.db.main import api as db
diff --git a/nova/tests/unit/objects/test_instance_group.py b/nova/tests/unit/objects/test_instance_group.py
index 41efd08a36..5ea566fea7 100644
--- a/nova/tests/unit/objects/test_instance_group.py
+++ b/nova/tests/unit/objects/test_instance_group.py
@@ -13,8 +13,8 @@
# under the License.
import copy
+from unittest import mock
-import mock
from oslo_serialization import jsonutils
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import timeutils
@@ -317,7 +317,7 @@ class _TestInstanceGroupObject(object):
obj_primitive = obj.obj_to_primitive()
self.assertIn('policy', data(obj_primitive))
self.assertIn('policies', data(obj_primitive))
- # Before 1.10, only has polices which is the list of policy name
+ # Before 1.10, only has policies which is the list of policy name
obj_primitive = obj.obj_to_primitive('1.10')
self.assertNotIn('policy', data(obj_primitive))
self.assertIn('policies', data(obj_primitive))
diff --git a/nova/tests/unit/objects/test_instance_info_cache.py b/nova/tests/unit/objects/test_instance_info_cache.py
index 2df596f5af..2c4d6a3263 100644
--- a/nova/tests/unit/objects/test_instance_info_cache.py
+++ b/nova/tests/unit/objects/test_instance_info_cache.py
@@ -13,8 +13,9 @@
# under the License.
import datetime
+from unittest import mock
-import mock
+from oslo_db import exception as db_exc
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import timeutils
@@ -82,6 +83,30 @@ class _TestInstanceInfoCacheObject(object):
self.assertEqual(timeutils.normalize_time(fake_updated_at),
timeutils.normalize_time(obj.updated_at))
+ @mock.patch.object(db, 'instance_info_cache_update')
+ def test_save_fkey_constraint_fail(self, mock_update):
+ fake_updated_at = datetime.datetime(2015, 1, 1)
+ nwinfo = network_model.NetworkInfo.hydrate([{'address': 'foo'}])
+ nwinfo_json = nwinfo.json()
+ new_info_cache = fake_info_cache.copy()
+ new_info_cache['id'] = 1
+ new_info_cache['updated_at'] = fake_updated_at
+ new_info_cache['network_info'] = nwinfo_json
+
+ # We should see InstanceNotFound raised for fkey=instance_uuid
+ mock_update.side_effect = db_exc.DBReferenceError(
+ 'table', 'constraint', 'instance_uuid', 'key_table')
+
+ obj = instance_info_cache.InstanceInfoCache(context=self.context)
+ obj.instance_uuid = uuids.info_instance
+ obj.network_info = nwinfo_json
+ self.assertRaises(exception.InstanceNotFound, obj.save)
+
+ # We should see the original exception raised for any other fkey
+ mock_update.side_effect = db_exc.DBReferenceError(
+ 'table', 'constraint', 'otherkey', 'key_table')
+ self.assertRaises(db_exc.DBReferenceError, obj.save)
+
@mock.patch.object(db, 'instance_info_cache_get',
return_value=fake_info_cache)
def test_refresh(self, mock_get):
diff --git a/nova/tests/unit/objects/test_instance_mapping.py b/nova/tests/unit/objects/test_instance_mapping.py
index 2c877c0a1f..865f5b6581 100644
--- a/nova/tests/unit/objects/test_instance_mapping.py
+++ b/nova/tests/unit/objects/test_instance_mapping.py
@@ -10,7 +10,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from oslo_utils import uuidutils
from sqlalchemy.orm import exc as orm_exc
diff --git a/nova/tests/unit/objects/test_instance_numa.py b/nova/tests/unit/objects/test_instance_numa.py
index f7a9ef7a1d..0d3bd0dba0 100644
--- a/nova/tests/unit/objects/test_instance_numa.py
+++ b/nova/tests/unit/objects/test_instance_numa.py
@@ -11,7 +11,8 @@
# under the License.
import copy
-import mock
+from unittest import mock
+
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_versionedobjects import base as ovo_base
import testtools
diff --git a/nova/tests/unit/objects/test_instance_pci_requests.py b/nova/tests/unit/objects/test_instance_pci_requests.py
index 9b6003ca49..91b289dbd5 100644
--- a/nova/tests/unit/objects/test_instance_pci_requests.py
+++ b/nova/tests/unit/objects/test_instance_pci_requests.py
@@ -10,7 +10,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from oslo_serialization import jsonutils
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_versionedobjects import base as ovo_base
@@ -112,23 +113,6 @@ class _TestInstancePCIRequests(object):
self.assertIsNone(req.requests[0].requester_id)
self.assertEqual(uuids.requester_id, req.requests[1].requester_id)
- def test_from_request_spec_instance_props(self):
- requests = objects.InstancePCIRequests(
- requests=[objects.InstancePCIRequest(count=1,
- request_id=FAKE_UUID,
- spec=[{'vendor_id': '8086',
- 'device_id': '1502'}])
- ],
- instance_uuid=FAKE_UUID)
- result = jsonutils.to_primitive(requests)
- result = objects.InstancePCIRequests.from_request_spec_instance_props(
- result)
- self.assertEqual(1, len(result.requests))
- self.assertEqual(1, result.requests[0].count)
- self.assertEqual(FAKE_UUID, result.requests[0].request_id)
- self.assertEqual([{'vendor_id': '8086', 'device_id': '1502'}],
- result.requests[0].spec)
-
def test_obj_make_compatible_pre_1_2(self):
topo_obj = objects.InstancePCIRequest(
count=1,
diff --git a/nova/tests/unit/objects/test_keypair.py b/nova/tests/unit/objects/test_keypair.py
index ad405b7e1b..b86bbb44de 100644
--- a/nova/tests/unit/objects/test_keypair.py
+++ b/nova/tests/unit/objects/test_keypair.py
@@ -12,7 +12,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from oslo_utils import timeutils
from nova import exception
diff --git a/nova/tests/unit/objects/test_migrate_data.py b/nova/tests/unit/objects/test_migrate_data.py
index bc04c5bd13..7ceaf2a192 100644
--- a/nova/tests/unit/objects/test_migrate_data.py
+++ b/nova/tests/unit/objects/test_migrate_data.py
@@ -94,8 +94,8 @@ class _TestLibvirtLiveMigrateData(object):
target_connect_addr='127.0.0.1',
dst_wants_file_backed_memory=False,
file_backed_memory_discard=False,
- src_supports_numa_live_migraton=True,
- dst_supports_numa_live_migraton=True,
+ src_supports_numa_live_migration=True,
+ dst_supports_numa_live_migration=True,
dst_numa_info=migrate_data.LibvirtLiveMigrateNUMAInfo())
manifest = ovo_base.obj_tree_get_versions(obj.obj_name())
@@ -219,67 +219,6 @@ class TestRemoteHyperVLiveMigrateData(test_objects._RemoteTest,
pass
-class _TestPowerVMLiveMigrateData(object):
- @staticmethod
- def _mk_obj():
- return migrate_data.PowerVMLiveMigrateData(
- host_mig_data=dict(one=2),
- dest_ip='1.2.3.4',
- dest_user_id='a_user',
- dest_sys_name='a_sys',
- public_key='a_key',
- dest_proc_compat='POWER7',
- vol_data=dict(three=4),
- vea_vlan_mappings=dict(five=6),
- old_vol_attachment_ids=dict(seven=8),
- wait_for_vif_plugged=True)
-
- @staticmethod
- def _mk_leg():
- return {
- 'host_mig_data': {'one': '2'},
- 'dest_ip': '1.2.3.4',
- 'dest_user_id': 'a_user',
- 'dest_sys_name': 'a_sys',
- 'public_key': 'a_key',
- 'dest_proc_compat': 'POWER7',
- 'vol_data': {'three': '4'},
- 'vea_vlan_mappings': {'five': '6'},
- 'old_vol_attachment_ids': {'seven': '8'},
- 'wait_for_vif_plugged': True
- }
-
- def test_migrate_data(self):
- obj = self._mk_obj()
- self.assertEqual('a_key', obj.public_key)
- obj.public_key = 'key2'
- self.assertEqual('key2', obj.public_key)
-
- def test_obj_make_compatible(self):
- obj = self._mk_obj()
-
- data = lambda x: x['nova_object.data']
-
- primitive = data(obj.obj_to_primitive())
- self.assertIn('vea_vlan_mappings', primitive)
- primitive = data(obj.obj_to_primitive(target_version='1.0'))
- self.assertNotIn('vea_vlan_mappings', primitive)
- primitive = data(obj.obj_to_primitive(target_version='1.1'))
- self.assertNotIn('old_vol_attachment_ids', primitive)
- primitive = data(obj.obj_to_primitive(target_version='1.2'))
- self.assertNotIn('wait_for_vif_plugged', primitive)
-
-
-class TestPowerVMLiveMigrateData(test_objects._LocalTest,
- _TestPowerVMLiveMigrateData):
- pass
-
-
-class TestRemotePowerVMLiveMigrateData(test_objects._RemoteTest,
- _TestPowerVMLiveMigrateData):
- pass
-
-
class TestVIFMigrateData(test.NoDBTestCase):
def test_get_dest_vif_source_vif_not_set(self):
diff --git a/nova/tests/unit/objects/test_migration.py b/nova/tests/unit/objects/test_migration.py
index 970122a409..6da232b933 100644
--- a/nova/tests/unit/objects/test_migration.py
+++ b/nova/tests/unit/objects/test_migration.py
@@ -12,7 +12,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from oslo_utils.fixture import uuidsentinel
from oslo_utils import timeutils
diff --git a/nova/tests/unit/objects/test_migration_context.py b/nova/tests/unit/objects/test_migration_context.py
index 94e8e9d57f..12becaee38 100644
--- a/nova/tests/unit/objects/test_migration_context.py
+++ b/nova/tests/unit/objects/test_migration_context.py
@@ -10,7 +10,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from oslo_serialization import jsonutils
from oslo_utils.fixture import uuidsentinel as uuids
diff --git a/nova/tests/unit/objects/test_objects.py b/nova/tests/unit/objects/test_objects.py
index 33be416167..aab079381c 100644
--- a/nova/tests/unit/objects/test_objects.py
+++ b/nova/tests/unit/objects/test_objects.py
@@ -19,9 +19,9 @@ import datetime
import inspect
import os
import pprint
+from unittest import mock
import fixtures
-import mock
from oslo_utils import timeutils
from oslo_versionedobjects import base as ovo_base
from oslo_versionedobjects import exception as ovo_exc
@@ -1046,7 +1046,7 @@ class TestRegistry(test.NoDBTestCase):
object_data = {
'Aggregate': '1.3-f315cb68906307ca2d1cca84d4753585',
'AggregateList': '1.3-3ea55a050354e72ef3306adefa553957',
- 'BlockDeviceMapping': '1.20-45a6ad666ddf14bbbedece2293af77e2',
+ 'BlockDeviceMapping': '1.21-220abb8aa1450e759b72fce8ec6ff955',
'BlockDeviceMappingList': '1.18-73bcbbae5ef5e8adcedbc821db869306',
'BuildRequest': '1.3-077dee42bed93f8a5b62be77657b7152',
'BuildRequestList': '1.0-cd95608eccb89fbc702c8b52f38ec738',
@@ -1066,20 +1066,20 @@ object_data = {
'EC2InstanceMapping': '1.0-a4556eb5c5e94c045fe84f49cf71644f',
'Flavor': '1.2-4ce99b41327bb230262e5a8f45ff0ce3',
'FlavorList': '1.1-52b5928600e7ca973aa4fc1e46f3934c',
- 'HVSpec': '1.2-de06bcec472a2f04966b855a49c46b41',
'HostMapping': '1.0-1a3390a696792a552ab7bd31a77ba9ac',
'HostMappingList': '1.1-18ac2bfb8c1eb5545bed856da58a79bc',
+ 'HVSpec': '1.2-de06bcec472a2f04966b855a49c46b41',
'HyperVLiveMigrateData': '1.4-e265780e6acfa631476c8170e8d6fce0',
'IDEDeviceBus': '1.0-29d4c9f27ac44197f01b6ac1b7e16502',
'ImageMeta': '1.8-642d1b2eb3e880a367f37d72dd76162d',
- 'ImageMetaProps': '1.30-5bfc3dd01bbfdbb28cb3a096c0b2f383',
+ 'ImageMetaProps': '1.34-29b3a6b7fe703f36bfd240d914f16c21',
'Instance': '2.7-d187aec68cad2e4d8b8a03a68e4739ce',
'InstanceAction': '1.2-9a5abc87fdd3af46f45731960651efb5',
'InstanceActionEvent': '1.4-5b1f361bd81989f8bb2c20bb7e8a4cb4',
'InstanceActionEventList': '1.1-13d92fb953030cdbfee56481756e02be',
'InstanceActionList': '1.1-a2b2fb6006b47c27076d3a1d48baa759',
'InstanceDeviceMetadata': '1.0-74d78dd36aa32d26d2769a1b57caf186',
- 'InstanceExternalEvent': '1.4-06c2dfcf2d2813c24cd37ee728524f1a',
+ 'InstanceExternalEvent': '1.5-1ec57351a9851c1eb43ccd90662d6dd0',
'InstanceFault': '1.2-7ef01f16f1084ad1304a513d6d410a38',
'InstanceFaultList': '1.2-6bb72de2872fe49ded5eb937a93f2451',
'InstanceGroup': '1.11-852ac511d30913ee88f3c3a869a8f30a',
@@ -1097,27 +1097,27 @@ object_data = {
'LibvirtLiveMigrateBDMInfo': '1.1-5f4a68873560b6f834b74e7861d71aaf',
'LibvirtLiveMigrateData': '1.10-348cf70ea44d3b985f45f64725d6f6a7',
'LibvirtLiveMigrateNUMAInfo': '1.0-0e777677f3459d0ed1634eabbdb6c22f',
+ 'LibvirtVPMEMDevice': '1.0-17ffaf47585199eeb9a2b83d6bde069f',
'MemoryDiagnostics': '1.0-2c995ae0f2223bb0f8e523c5cc0b83da',
'Migration': '1.7-bd45b232fd7c95cd79ae9187e10ef582',
'MigrationContext': '1.2-89f10a83999f852a489962ae37d8a026',
'MigrationList': '1.5-36793f8d65bae421bd5564d09a4de7be',
'MonitorMetric': '1.1-53b1db7c4ae2c531db79761e7acc52ba',
'MonitorMetricList': '1.1-15ecf022a68ddbb8c2a6739cfc9f8f5e',
- 'NUMACell': '1.5-2592de3c926a7840d763bcc85f81afa7',
- 'NUMAPagesTopology': '1.1-edab9fa2dc43c117a38d600be54b4542',
- 'NUMATopology': '1.2-c63fad38be73b6afd04715c9c1b29220',
- 'NUMATopologyLimits': '1.1-4235c5da7a76c7e36075f0cd2f5cf922',
'NetworkInterfaceMetadata': '1.2-6f3d480b40fe339067b1c0dd4d656716',
'NetworkMetadata': '1.0-2cb8d21b34f87b0261d3e1d1ae5cf218',
'NetworkRequest': '1.3-3a815ea3df7defa61e0b894dee5288ba',
'NetworkRequestList': '1.1-15ecf022a68ddbb8c2a6739cfc9f8f5e',
'NicDiagnostics': '1.0-895e9ad50e0f56d5258585e3e066aea5',
- 'PCIDeviceBus': '1.0-2b891cb77e42961044689f3dc2718995',
+ 'NUMACell': '1.5-2592de3c926a7840d763bcc85f81afa7',
+ 'NUMAPagesTopology': '1.1-edab9fa2dc43c117a38d600be54b4542',
+ 'NUMATopology': '1.2-c63fad38be73b6afd04715c9c1b29220',
+ 'NUMATopologyLimits': '1.1-4235c5da7a76c7e36075f0cd2f5cf922',
'PciDevice': '1.7-680e4c590aae154958ccf9677774413b',
+ 'PCIDeviceBus': '1.0-2b891cb77e42961044689f3dc2718995',
'PciDeviceList': '1.3-52ff14355491c8c580bdc0ba34c26210',
'PciDevicePool': '1.1-3f5ddc3ff7bfa14da7f6c7e9904cc000',
'PciDevicePoolList': '1.1-15ecf022a68ddbb8c2a6739cfc9f8f5e',
- 'PowerVMLiveMigrateData': '1.4-a745f4eda16b45e1bc5686a0c498f27e',
'Quotas': '1.3-3b2b91371f60e788035778fc5f87797d',
'QuotasNoOp': '1.3-d1593cf969c81846bc8192255ea95cce',
'RequestGroup': '1.3-0458d350a8ec9d0673f9be5640a990ce',
@@ -1127,9 +1127,9 @@ object_data = {
'ResourceList': '1.0-4a53826625cc280e15fae64a575e0879',
'ResourceMetadata': '1.0-77509ea1ea0dd750d5864b9bd87d3f9d',
'S3ImageMapping': '1.0-7dd7366a890d82660ed121de9092276e',
- 'SCSIDeviceBus': '1.0-61c1e89a00901069ab1cf2991681533b',
'SchedulerLimits': '1.0-249c4bd8e62a9b327b7026b7f19cc641',
'SchedulerRetries': '1.1-3c9c8b16143ebbb6ad7030e999d14cc0',
+ 'SCSIDeviceBus': '1.0-61c1e89a00901069ab1cf2991681533b',
'SecurityGroup': '1.2-86d67d8d3ab0c971e1dc86e02f9524a8',
'SecurityGroupList': '1.1-c655ed13298e630f4d398152f7d08d71',
'Selection': '1.1-548e3c2f04da2a61ceaf9c4e1589f264',
@@ -1142,16 +1142,14 @@ object_data = {
'TrustedCerts': '1.0-dcf528851e0f868c77ee47e90563cda7',
'USBDeviceBus': '1.0-e4c7dd6032e46cd74b027df5eb2d4750',
'VIFMigrateData': '1.0-cb15282b25a039ab35046ed705eb931d',
- 'VMwareLiveMigrateData': '1.0-a3cc858a2bf1d3806d6f57cfaa1fb98a',
'VirtCPUFeature': '1.0-ea2464bdd09084bd388e5f61d5d4fc86',
'VirtCPUModel': '1.0-5e1864af9227f698326203d7249796b5',
'VirtCPUTopology': '1.0-fc694de72e20298f7c6bab1083fd4563',
'VirtualInterface': '1.3-efd3ca8ebcc5ce65fff5a25f31754c54',
'VirtualInterfaceList': '1.0-9750e2074437b3077e46359102779fc6',
+ 'VMwareLiveMigrateData': '1.0-a3cc858a2bf1d3806d6f57cfaa1fb98a',
'VolumeUsage': '1.0-6c8190c46ce1469bb3286a1f21c2e475',
'XenDeviceBus': '1.0-272a4f899b24e31e42b2b9a7ed7e9194',
- # TODO(efried): re-alphabetize this
- 'LibvirtVPMEMDevice': '1.0-17ffaf47585199eeb9a2b83d6bde069f',
}
diff --git a/nova/tests/unit/objects/test_pci_device.py b/nova/tests/unit/objects/test_pci_device.py
index 277a7fe7c4..1e971c5a21 100644
--- a/nova/tests/unit/objects/test_pci_device.py
+++ b/nova/tests/unit/objects/test_pci_device.py
@@ -14,8 +14,8 @@
# under the License.
import copy
+from unittest import mock
-import mock
from oslo_serialization import jsonutils
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import timeutils
@@ -161,6 +161,16 @@ class _TestPciDeviceObject(object):
'vendor_id', 'numa_node', 'status', 'uuid',
'extra_info', 'dev_type', 'parent_addr']))
+ def test_pci_device_extra_info_card_serial_number(self):
+ self.dev_dict = copy.copy(dev_dict)
+ self.pci_device = pci_device.PciDevice.create(None, self.dev_dict)
+ self.assertIsNone(self.pci_device.card_serial_number)
+
+ self.dev_dict = copy.copy(dev_dict)
+ self.dev_dict['capabilities'] = {'vpd': {'card_serial_number': '42'}}
+ self.pci_device = pci_device.PciDevice.create(None, self.dev_dict)
+ self.assertEqual(self.pci_device.card_serial_number, '42')
+
def test_update_device(self):
self.pci_device = pci_device.PciDevice.create(None, dev_dict)
self.pci_device.obj_reset_changes()
@@ -457,6 +467,16 @@ class _TestPciDeviceObject(object):
devobj.claim(self.inst.uuid)
self.assertRaises(exception.PciDeviceInvalidStatus, devobj.remove)
+ def test_remove_device_fail_owned_with_unavailable_state(self):
+ # This test creates an PCI device in an invalid state. This should
+ # not happen in any known scenario. But we want to be save not to allow
+ # removing a device that has an owner. See bug 1969496 for more details
+ self._create_fake_instance()
+ devobj = pci_device.PciDevice.create(None, dev_dict)
+ devobj.claim(self.inst.uuid)
+ devobj.status = fields.PciDeviceStatus.UNAVAILABLE
+ self.assertRaises(exception.PciDeviceInvalidOwner, devobj.remove)
+
class TestPciDeviceObject(test_objects._LocalTest,
_TestPciDeviceObject):
diff --git a/nova/tests/unit/objects/test_quotas.py b/nova/tests/unit/objects/test_quotas.py
index 154c9f278a..15da48f1c4 100644
--- a/nova/tests/unit/objects/test_quotas.py
+++ b/nova/tests/unit/objects/test_quotas.py
@@ -12,7 +12,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from nova import context
from nova.db.main import api as db_api
diff --git a/nova/tests/unit/objects/test_request_spec.py b/nova/tests/unit/objects/test_request_spec.py
index 31797f8133..58b9859234 100644
--- a/nova/tests/unit/objects/test_request_spec.py
+++ b/nova/tests/unit/objects/test_request_spec.py
@@ -12,8 +12,8 @@
# License for the specific language governing permissions and limitations
# under the License.
import collections
+from unittest import mock
-import mock
from oslo_serialization import jsonutils
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import uuidutils
@@ -116,14 +116,19 @@ class _TestRequestSpecObject(object):
else:
self.assertEqual(instance.get(field), getattr(spec, field))
- @mock.patch.object(objects.InstancePCIRequests,
- 'from_request_spec_instance_props')
- def test_from_instance_with_pci_requests(self, pci_from_spec):
- fake_pci_requests = objects.InstancePCIRequests()
- pci_from_spec.return_value = fake_pci_requests
+ def test_from_instance_with_pci_requests(self):
+ fake_pci_requests = objects.InstancePCIRequests(
+ instance_uuid=uuids.instance,
+ requests=[
+ objects.InstancePCIRequest(
+ count=1,
+ spec=[{'vendor_id': '8086'}],
+ ),
+ ],
+ )
instance = dict(
- uuid=uuidutils.generate_uuid(),
+ uuid=uuids.instance,
root_gb=10,
ephemeral_gb=0,
memory_mb=10,
@@ -132,14 +137,15 @@ class _TestRequestSpecObject(object):
project_id=fakes.FAKE_PROJECT_ID,
user_id=fakes.FAKE_USER_ID,
availability_zone='nova',
- pci_requests={
- 'instance_uuid': 'fakeid',
- 'requests': [{'count': 1, 'spec': [{'vendor_id': '8086'}]}]})
+ pci_requests=fake_pci_requests.obj_to_primitive(),
+ )
spec = objects.RequestSpec()
spec._from_instance(instance)
- pci_from_spec.assert_called_once_with(instance['pci_requests'])
- self.assertEqual(fake_pci_requests, spec.pci_requests)
+ self.assertEqual(
+ fake_pci_requests.requests[0].spec,
+ spec.pci_requests.requests[0].spec,
+ )
def test_from_instance_with_numa_stuff(self):
instance = dict(
@@ -424,6 +430,62 @@ class _TestRequestSpecObject(object):
self.assertListEqual([rg], spec.requested_resources)
self.assertEqual(req_lvl_params, spec.request_level_params)
+ def test_from_components_flavor_based_pci_requests(self):
+ self.flags(group='filter_scheduler', pci_in_placement=True)
+ ctxt = context.RequestContext(
+ fakes.FAKE_USER_ID, fakes.FAKE_PROJECT_ID
+ )
+ instance = fake_instance.fake_instance_obj(ctxt)
+ image = {
+ "id": uuids.image_id,
+ "properties": {"mappings": []},
+ "status": "fake-status",
+ "location": "far-away",
+ }
+ flavor = fake_flavor.fake_flavor_obj(ctxt)
+ filter_properties = {"fake": "property"}
+
+ qos_port_rg = request_spec.RequestGroup()
+ req_lvl_params = request_spec.RequestLevelParams()
+
+ pci_requests = objects.InstancePCIRequests(
+ requests=[
+ objects.InstancePCIRequest(
+ count=1,
+ alias_name='a-dev',
+ request_id=uuids.req1,
+ spec=[{"vendor_id": "1234", "product_id": "fe12"}],
+ )
+ ]
+ )
+ pci_request_group = request_spec.RequestGroup(
+ requester_id=f"{uuids.req1}-0",
+ resources={"CUSTOM_PCI_1234_FE12": 1},
+ same_provider=True,
+ )
+
+ spec = objects.RequestSpec.from_components(
+ ctxt,
+ instance.uuid,
+ image,
+ flavor,
+ instance.numa_topology,
+ pci_requests,
+ filter_properties,
+ None,
+ instance.availability_zone,
+ port_resource_requests=[qos_port_rg],
+ request_level_params=req_lvl_params,
+ )
+
+ self.assertEqual(2, len(spec.requested_resources))
+ self.assertEqual(qos_port_rg, spec.requested_resources[0])
+ self.assertEqual(
+ pci_request_group.obj_to_primitive(),
+ spec.requested_resources[1].obj_to_primitive(),
+ )
+ self.assertEqual(req_lvl_params, spec.request_level_params)
+
def test_get_scheduler_hint(self):
spec_obj = objects.RequestSpec(scheduler_hints={'foo_single': ['1'],
'foo_mul': ['1', '2']})
@@ -615,6 +677,30 @@ class _TestRequestSpecObject(object):
self.assertIsInstance(req_obj.instance_group, objects.InstanceGroup)
self.assertEqual('fresh', req_obj.instance_group.name)
+ @mock.patch.object(
+ request_spec.RequestSpec, '_get_by_instance_uuid_from_db'
+ )
+ @mock.patch('nova.objects.InstanceGroup.get_by_uuid')
+ def test_get_by_instance_uuid_deleted_group(
+ self, mock_get_ig, get_by_uuid
+ ):
+ fake_spec_obj = fake_request_spec.fake_spec_obj()
+ fake_spec_obj.scheduler_hints['group'] = ['fresh']
+ fake_spec = fake_request_spec.fake_db_spec(fake_spec_obj)
+ get_by_uuid.return_value = fake_spec
+ mock_get_ig.side_effect = exception.InstanceGroupNotFound(
+ group_uuid=uuids.instgroup
+ )
+
+ req_obj = request_spec.RequestSpec.get_by_instance_uuid(
+ self.context, fake_spec['instance_uuid']
+ )
+ # assert that both the instance_group object and scheduler hint
+ # are cleared if the instance_group was deleted since the request
+ # spec was last saved to the db.
+ self.assertIsNone(req_obj.instance_group, objects.InstanceGroup)
+ self.assertEqual({'hint': ['over-there']}, req_obj.scheduler_hints)
+
@mock.patch('nova.objects.request_spec.RequestSpec.save')
@mock.patch.object(
request_spec.RequestSpec, '_get_by_instance_uuid_from_db')
@@ -1024,6 +1110,183 @@ class TestRemoteRequestSpecObject(test_objects._RemoteTest,
pass
+class TestInstancePCIRequestToRequestGroups(test.NoDBTestCase):
+ def setUp(self):
+ super().setUp()
+ self.flags(group='filter_scheduler', pci_in_placement=True)
+
+ def test_pci_reqs_ignored_if_disabled(self):
+ self.flags(group='filter_scheduler', pci_in_placement=False)
+
+ spec = request_spec.RequestSpec(
+ requested_resources=[],
+ pci_requests=objects.InstancePCIRequests(
+ requests=[
+ objects.InstancePCIRequest(
+ count=1,
+ request_id=uuids.req1,
+ spec=[{"vendor_id": "de12", "product_id": "1234"}],
+ alias_name="a-dev",
+ ),
+ ]
+ ),
+ )
+
+ spec.generate_request_groups_from_pci_requests()
+
+ self.assertEqual(0, len(spec.requested_resources))
+
+ def test_neutron_based_requests_are_ignored(self):
+ pci_req = objects.InstancePCIRequest(
+ count=1,
+ request_id=uuids.req1,
+ spec=[],
+ )
+ spec = request_spec.RequestSpec(
+ requested_resources=[],
+ pci_requests=objects.InstancePCIRequests(requests=[pci_req]),
+ )
+ self.assertEqual(
+ objects.InstancePCIRequest.NEUTRON_PORT, pci_req.source
+ )
+
+ spec.generate_request_groups_from_pci_requests()
+
+ self.assertEqual(0, len(spec.requested_resources))
+
+ def test_rc_from_product_and_vendor(self):
+ spec = request_spec.RequestSpec(
+ requested_resources=[],
+ pci_requests=objects.InstancePCIRequests(
+ requests=[
+ objects.InstancePCIRequest(
+ count=1,
+ request_id=uuids.req1,
+ spec=[{"vendor_id": "de12", "product_id": "1234"}],
+ alias_name="a-dev",
+ ),
+ objects.InstancePCIRequest(
+ count=1,
+ request_id=uuids.req2,
+ spec=[{"vendor_id": "fff", "product_id": "dead"}],
+ alias_name="a-dev",
+ ),
+ ]
+ ),
+ )
+
+ spec.generate_request_groups_from_pci_requests()
+
+ self.assertEqual(2, len(spec.requested_resources))
+ self.assertEqual(
+ request_spec.RequestGroup(
+ requester_id=f"{uuids.req1}-0",
+ resources={"CUSTOM_PCI_DE12_1234": 1},
+ use_same_provider=True,
+ ).obj_to_primitive(),
+ spec.requested_resources[0].obj_to_primitive(),
+ )
+ self.assertEqual(
+ request_spec.RequestGroup(
+ requester_id=f"{uuids.req2}-0",
+ resources={"CUSTOM_PCI_FFF_DEAD": 1},
+ use_same_provider=True,
+ ).obj_to_primitive(),
+ spec.requested_resources[1].obj_to_primitive(),
+ )
+
+ def test_multi_device_split_to_multiple_groups(self):
+ spec = request_spec.RequestSpec(
+ requested_resources=[],
+ pci_requests=objects.InstancePCIRequests(
+ requests=[
+ objects.InstancePCIRequest(
+ count=2,
+ request_id=uuids.req1,
+ spec=[{"vendor_id": "de12", "product_id": "1234"}],
+ alias_name="a-dev",
+ ),
+ ]
+ ),
+ )
+
+ spec.generate_request_groups_from_pci_requests()
+
+ self.assertEqual(2, len(spec.requested_resources))
+ self.assertEqual(
+ request_spec.RequestGroup(
+ requester_id=f"{uuids.req1}-0",
+ resources={"CUSTOM_PCI_DE12_1234": 1},
+ use_same_provider=True,
+ ).obj_to_primitive(),
+ spec.requested_resources[0].obj_to_primitive(),
+ )
+ self.assertEqual(
+ request_spec.RequestGroup(
+ requester_id=f"{uuids.req1}-1",
+ resources={"CUSTOM_PCI_DE12_1234": 1},
+ use_same_provider=True,
+ ).obj_to_primitive(),
+ spec.requested_resources[1].obj_to_primitive(),
+ )
+
+ def test_with_rc_and_traits_from_the_pci_req_spec(self):
+ spec = request_spec.RequestSpec(
+ requested_resources=[],
+ pci_requests=objects.InstancePCIRequests(
+ requests=[
+ objects.InstancePCIRequest(
+ count=1,
+ request_id=uuids.req1,
+ spec=[
+ {
+ "vendor_id": "de12",
+ "product_id": "1234",
+ "resource_class": "gpu",
+ }
+ ],
+ alias_name="a-dev",
+ ),
+ objects.InstancePCIRequest(
+ count=1,
+ request_id=uuids.req2,
+ spec=[
+ {
+ "vendor_id": "fff",
+ "product_id": "dead",
+ "traits": "foo,bar,CUSTOM_BLUE",
+ }
+ ],
+ alias_name="a-dev",
+ ),
+ ]
+ ),
+ )
+
+ spec.generate_request_groups_from_pci_requests()
+
+ self.assertEqual(2, len(spec.requested_resources))
+ self.assertEqual(
+ request_spec.RequestGroup(
+ requester_id=f"{uuids.req1}-0",
+ resources={"CUSTOM_GPU": 1},
+ use_same_provider=True,
+ ).obj_to_primitive(),
+ spec.requested_resources[0].obj_to_primitive(),
+ )
+ # Note that sets would be serialized to tuples by obj_to_primitive in
+ # random order, so we need to match this spec field by field
+ expected = request_spec.RequestGroup(
+ requester_id=f"{uuids.req2}-0",
+ resources={"CUSTOM_PCI_FFF_DEAD": 1},
+ required_traits={"CUSTOM_FOO", "CUSTOM_BAR", "CUSTOM_BLUE"},
+ use_same_provider=True,
+ )
+ actual = spec.requested_resources[1]
+ for field in request_spec.RequestGroup.fields.keys():
+ self.assertEqual(getattr(expected, field), getattr(actual, field))
+
+
class TestRequestGroupObject(test.NoDBTestCase):
def setUp(self):
super(TestRequestGroupObject, self).setUp()
diff --git a/nova/tests/unit/objects/test_resource.py b/nova/tests/unit/objects/test_resource.py
index 3ac12eee84..0e43df185b 100644
--- a/nova/tests/unit/objects/test_resource.py
+++ b/nova/tests/unit/objects/test_resource.py
@@ -10,7 +10,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from oslo_serialization import jsonutils
from oslo_utils.fixture import uuidsentinel as uuids
diff --git a/nova/tests/unit/objects/test_security_group.py b/nova/tests/unit/objects/test_security_group.py
index 7d6a3773c5..527e5d84d6 100644
--- a/nova/tests/unit/objects/test_security_group.py
+++ b/nova/tests/unit/objects/test_security_group.py
@@ -12,7 +12,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_versionedobjects import fixture as ovo_fixture
diff --git a/nova/tests/unit/objects/test_service.py b/nova/tests/unit/objects/test_service.py
index 84cbd4bf6a..60ab806207 100644
--- a/nova/tests/unit/objects/test_service.py
+++ b/nova/tests/unit/objects/test_service.py
@@ -12,7 +12,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from oslo_utils.fixture import uuidsentinel
from oslo_utils import timeutils
from oslo_versionedobjects import base as ovo_base
diff --git a/nova/tests/unit/objects/test_tag.py b/nova/tests/unit/objects/test_tag.py
index 29579b1e78..caf039152d 100644
--- a/nova/tests/unit/objects/test_tag.py
+++ b/nova/tests/unit/objects/test_tag.py
@@ -10,7 +10,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from nova.objects import tag
from nova.tests.unit.objects import test_objects
diff --git a/nova/tests/unit/objects/test_task_log.py b/nova/tests/unit/objects/test_task_log.py
index 6d93ebab4c..2ac7971c28 100644
--- a/nova/tests/unit/objects/test_task_log.py
+++ b/nova/tests/unit/objects/test_task_log.py
@@ -11,9 +11,9 @@
# under the License.
import datetime
+from unittest import mock
import iso8601
-import mock
from oslo_utils import timeutils
from nova import objects
diff --git a/nova/tests/unit/objects/test_trusted_certs.py b/nova/tests/unit/objects/test_trusted_certs.py
index 3010dd6b5c..9029845ef3 100644
--- a/nova/tests/unit/objects/test_trusted_certs.py
+++ b/nova/tests/unit/objects/test_trusted_certs.py
@@ -10,7 +10,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from nova.objects import trusted_certs
from nova.tests.unit.objects import test_objects
diff --git a/nova/tests/unit/objects/test_virtual_interface.py b/nova/tests/unit/objects/test_virtual_interface.py
index a9049bac88..a806668c6b 100644
--- a/nova/tests/unit/objects/test_virtual_interface.py
+++ b/nova/tests/unit/objects/test_virtual_interface.py
@@ -12,7 +12,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from oslo_utils.fixture import uuidsentinel as uuids
from nova.db.main import api as db
diff --git a/nova/tests/unit/objects/test_volume_usage.py b/nova/tests/unit/objects/test_volume_usage.py
index a465955ad6..d8df53d5c7 100644
--- a/nova/tests/unit/objects/test_volume_usage.py
+++ b/nova/tests/unit/objects/test_volume_usage.py
@@ -10,7 +10,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import timeutils
diff --git a/nova/tests/unit/pci/fakes.py b/nova/tests/unit/pci/fakes.py
index 93ab33b27f..e0267ff087 100644
--- a/nova/tests/unit/pci/fakes.py
+++ b/nova/tests/unit/pci/fakes.py
@@ -14,8 +14,8 @@
# under the License.
import functools
+from unittest import mock
-import mock
from nova.pci import whitelist
diff --git a/nova/tests/unit/pci/test_devspec.py b/nova/tests/unit/pci/test_devspec.py
index 69d774cd86..4f747e7b7d 100644
--- a/nova/tests/unit/pci/test_devspec.py
+++ b/nova/tests/unit/pci/test_devspec.py
@@ -11,12 +11,12 @@
# License for the specific language governing permissions and limitations
# under the License.
-
-import mock
+from unittest import mock
from nova import exception
from nova import objects
from nova.pci import devspec
+from nova.pci.request import PCI_REMOTE_MANAGED_TAG
from nova import test
dev = {"vendor_id": "8086",
@@ -51,7 +51,7 @@ class PhysicalPciAddressTestCase(test.NoDBTestCase):
for component in invalid_val_addr:
address = dict(self.pci_addr)
address[component] = str(invalid_val_addr[component])
- self.assertRaises(exception.PciConfigInvalidWhitelist,
+ self.assertRaises(exception.PciConfigInvalidSpec,
devspec.PhysicalPciAddress, address)
def test_init_by_dict_missing_values(self):
@@ -75,7 +75,7 @@ class PhysicalPciAddressTestCase(test.NoDBTestCase):
"0000:0a:" + str(devspec.MAX_SLOT + 1) + ".5",
"0000:0a:00." + str(devspec.MAX_FUNC + 1)]
for address in invalid_addresses:
- self.assertRaises(exception.PciConfigInvalidWhitelist,
+ self.assertRaises(exception.PciConfigInvalidSpec,
devspec.PhysicalPciAddress, address)
def test_init_by_string_missing_values(self):
@@ -121,7 +121,7 @@ class PciAddressGlobSpecTestCase(test.NoDBTestCase):
"0000:0a:" + str(devspec.MAX_SLOT + 1) + ".5",
"0000:0a:00." + str(devspec.MAX_FUNC + 1)]
for address in invalid_addresses:
- self.assertRaises(exception.PciConfigInvalidWhitelist,
+ self.assertRaises(exception.PciConfigInvalidSpec,
devspec.PciAddressGlobSpec, address)
def test_match(self):
@@ -207,18 +207,18 @@ class PciAddressTestCase(test.NoDBTestCase):
def test_address_invalid_character(self):
pci_info = {"address": "0000:h4.12:6", "physical_network": "hr_net"}
- exc = self.assertRaises(exception.PciConfigInvalidWhitelist,
+ exc = self.assertRaises(exception.PciConfigInvalidSpec,
devspec.PciDeviceSpec, pci_info)
- msg = ("Invalid PCI devices Whitelist config: property func ('12:6') "
+ msg = ("Invalid [pci]device_spec config: property func ('12:6') "
"does not parse as a hex number.")
self.assertEqual(msg, str(exc))
def test_max_func(self):
pci_info = {"address": "0000:0a:00.%s" % (devspec.MAX_FUNC + 1),
"physical_network": "hr_net"}
- exc = self.assertRaises(exception.PciConfigInvalidWhitelist,
+ exc = self.assertRaises(exception.PciConfigInvalidSpec,
devspec.PciDeviceSpec, pci_info)
- msg = ('Invalid PCI devices Whitelist config: property func (%x) is '
+ msg = ('Invalid [pci]device_spec config: property func (%x) is '
'greater than the maximum allowable value (%x).'
% (devspec.MAX_FUNC + 1, devspec.MAX_FUNC))
self.assertEqual(msg, str(exc))
@@ -226,9 +226,9 @@ class PciAddressTestCase(test.NoDBTestCase):
def test_max_domain(self):
pci_info = {"address": "%x:0a:00.5" % (devspec.MAX_DOMAIN + 1),
"physical_network": "hr_net"}
- exc = self.assertRaises(exception.PciConfigInvalidWhitelist,
+ exc = self.assertRaises(exception.PciConfigInvalidSpec,
devspec.PciDeviceSpec, pci_info)
- msg = ('Invalid PCI devices Whitelist config: property domain (%X) '
+ msg = ('Invalid [pci]device_spec config: property domain (%X) '
'is greater than the maximum allowable value (%X).'
% (devspec.MAX_DOMAIN + 1, devspec.MAX_DOMAIN))
self.assertEqual(msg, str(exc))
@@ -236,9 +236,9 @@ class PciAddressTestCase(test.NoDBTestCase):
def test_max_bus(self):
pci_info = {"address": "0000:%x:00.5" % (devspec.MAX_BUS + 1),
"physical_network": "hr_net"}
- exc = self.assertRaises(exception.PciConfigInvalidWhitelist,
+ exc = self.assertRaises(exception.PciConfigInvalidSpec,
devspec.PciDeviceSpec, pci_info)
- msg = ('Invalid PCI devices Whitelist config: property bus (%X) is '
+ msg = ('Invalid [pci]device_spec config: property bus (%X) is '
'greater than the maximum allowable value (%X).'
% (devspec.MAX_BUS + 1, devspec.MAX_BUS))
self.assertEqual(msg, str(exc))
@@ -246,9 +246,9 @@ class PciAddressTestCase(test.NoDBTestCase):
def test_max_slot(self):
pci_info = {"address": "0000:0a:%x.5" % (devspec.MAX_SLOT + 1),
"physical_network": "hr_net"}
- exc = self.assertRaises(exception.PciConfigInvalidWhitelist,
+ exc = self.assertRaises(exception.PciConfigInvalidSpec,
devspec.PciDeviceSpec, pci_info)
- msg = ('Invalid PCI devices Whitelist config: property slot (%X) is '
+ msg = ('Invalid [pci]device_spec config: property slot (%X) is '
'greater than the maximum allowable value (%X).'
% (devspec.MAX_SLOT + 1, devspec.MAX_SLOT))
self.assertEqual(msg, str(exc))
@@ -382,10 +382,10 @@ class PciDevSpecTestCase(test.NoDBTestCase):
def test_vendor_id_out_of_range(self):
pci_info = {"vendor_id": "80860", "address": "*:*:*.5",
"product_id": "5057", "physical_network": "hr_net"}
- exc = self.assertRaises(exception.PciConfigInvalidWhitelist,
+ exc = self.assertRaises(exception.PciConfigInvalidSpec,
devspec.PciDeviceSpec, pci_info)
self.assertEqual(
- "Invalid PCI devices Whitelist config: property vendor_id (80860) "
+ "Invalid [pci]device_spec config: property vendor_id (80860) "
"is greater than the maximum allowable value (FFFF).",
str(exc))
@@ -398,10 +398,10 @@ class PciDevSpecTestCase(test.NoDBTestCase):
def test_product_id_out_of_range(self):
pci_info = {"vendor_id": "8086", "address": "*:*:*.5",
"product_id": "50570", "physical_network": "hr_net"}
- exc = self.assertRaises(exception.PciConfigInvalidWhitelist,
+ exc = self.assertRaises(exception.PciConfigInvalidSpec,
devspec.PciDeviceSpec, pci_info)
self.assertEqual(
- "Invalid PCI devices Whitelist config: property product_id "
+ "Invalid [pci]device_spec config: property product_id "
"(50570) is greater than the maximum allowable value (FFFF).",
str(exc))
@@ -449,3 +449,242 @@ class PciDevSpecTestCase(test.NoDBTestCase):
pci_obj = objects.PciDevice.create(None, pci_dev)
self.assertTrue(pci.match_pci_obj(pci_obj))
+
+
+class PciDevSpecRemoteManagedTestCase(test.NoDBTestCase):
+
+ def setUp(self):
+ self.test_dev = {
+ "vendor_id": "8086",
+ "product_id": "5057",
+ "address": "0000:0a:00.0",
+ "capabilities": {"vpd": {"card_serial_number": "MT2113X00000"}},
+ }
+ super().setUp()
+
+ @mock.patch('nova.pci.utils.get_function_by_ifname',
+ new=mock.Mock(return_value=(None, False)))
+ def test_remote_managed_unknown_raises(self):
+ pci_info = {"devname": "nonexdev0", "physical_network": "hr_net",
+ PCI_REMOTE_MANAGED_TAG: "true"}
+ self.assertRaises(exception.PciDeviceRemoteManagedNotPresent,
+ devspec.PciDeviceSpec, pci_info)
+
+ @mock.patch('nova.pci.utils.get_vf_product_id_by_pf_addr',
+ new=mock.Mock(return_value="5058"))
+ @mock.patch('nova.pci.utils.get_pci_ids_by_pci_addr',
+ new=mock.Mock(return_value=("8086", "5057")))
+ @mock.patch('nova.pci.utils.is_physical_function',
+ new=mock.Mock(return_value=True))
+ def test_remote_managed_pf_raises(self):
+ """Remote-managed PF test case with PF-based VF matching
+
+ 5058 is the expected VF product ID which differs from the
+ one specified in the whitelist. This is to simulate a mistake
+ in the whitelist where a user uses both the PF PCI address and
+ PF product and vendor ID instead of using the VF product ID.
+ """
+ pci_info = {"vendor_id": "8086", "address": "0000:0a:00.0",
+ "product_id": "5057", "physical_network": "hr_net",
+ PCI_REMOTE_MANAGED_TAG: "true"}
+ self.assertRaises(exception.PciDeviceInvalidPFRemoteManaged,
+ devspec.PciDeviceSpec, pci_info)
+
+ @mock.patch('nova.pci.utils.get_vf_product_id_by_pf_addr',
+ new=mock.Mock(return_value="5058"))
+ @mock.patch('nova.pci.utils.get_pci_ids_by_pci_addr',
+ new=mock.Mock(return_value=("8086", "5057")))
+ @mock.patch('nova.pci.utils.is_physical_function',
+ new=mock.Mock(return_value=True))
+ def test_remote_managed_vf_by_pf(self):
+ """Remote-managed PF test case with PF-based VF matching
+
+ This is to test the supported matching of a VF by using
+ its product and vendor ID and a specific PF PCI address.
+ """
+ # Full match: 5058 is the expected VF product ID which
+ # matches the one specified in the whitelist. This is to
+ # simulate the supported matching of a VF by using its
+ # product and vendor ID and a specific PF PCI address.
+ pci_info = {"vendor_id": "8086", "address": "0000:0a:00.0",
+ "product_id": "5058", "physical_network": "hr_net",
+ PCI_REMOTE_MANAGED_TAG: "true"}
+ devspec.PciDeviceSpec(pci_info)
+
+ # This spec would match both PFs and VFs. Since we care that
+ # remote-managed PFs are not allowed, we have to prohibit the
+ # this altogether.
+ pci_info = {"vendor_id": "*", "address": "0000:0a:00.0",
+ "product_id": "*", "physical_network": "hr_net",
+ PCI_REMOTE_MANAGED_TAG: "true"}
+ self.assertRaises(exception.PciDeviceInvalidPFRemoteManaged,
+ devspec.PciDeviceSpec, pci_info)
+
+ # Don't care about a VF product ID. Like above, this would
+ # match both PFs and VFs (since VFs have the same vendor ID).
+ # Therefore, this case is prohibited to avoid remote-managed PFs.
+ pci_info = {"vendor_id": "8086", "address": "0000:0a:00.0",
+ "product_id": "*", "physical_network": "hr_net",
+ PCI_REMOTE_MANAGED_TAG: "true"}
+ self.assertRaises(exception.PciDeviceInvalidPFRemoteManaged,
+ devspec.PciDeviceSpec, pci_info)
+
+ # Don't care about a VF vendor ID.
+ pci_info = {"vendor_id": "*", "address": "0000:0a:00.0",
+ "product_id": "5058", "physical_network": "hr_net",
+ PCI_REMOTE_MANAGED_TAG: "true"}
+ devspec.PciDeviceSpec(pci_info)
+
+ @mock.patch('nova.pci.utils.get_vf_product_id_by_pf_addr',
+ new=mock.Mock(return_value="5058"))
+ @mock.patch('nova.pci.utils.get_pci_ids_by_pci_addr',
+ new=mock.Mock(return_value=("8086", "5057")))
+ @mock.patch('nova.pci.utils.is_physical_function',
+ new=mock.Mock(return_value=True))
+ def test_remote_managed_vf_by_pf_raises(self):
+ """Remote-managed PF test case with PF-based VF matching
+
+ 5058 is the expected VF product ID which matches the one
+ specified in the whitelist. This is to simulate the supported
+ matching of a VF by using its product and vendor ID and a
+ specific PF PCI address.
+ """
+ # VF vendor ID and device ID mismatch.
+ pci_info = {"vendor_id": "8080", "address": "0000:0a:00.0",
+ "product_id": "5050", "physical_network": "hr_net",
+ PCI_REMOTE_MANAGED_TAG: "true"}
+ self.assertRaises(exception.PciConfigInvalidSpec,
+ devspec.PciDeviceSpec, pci_info)
+
+ # VF device ID mismatch.
+ pci_info = {"vendor_id": "8086", "address": "0000:0a:00.0",
+ "product_id": "5050", "physical_network": "hr_net",
+ PCI_REMOTE_MANAGED_TAG: "true"}
+ self.assertRaises(exception.PciConfigInvalidSpec,
+ devspec.PciDeviceSpec, pci_info)
+
+ # VF vendor ID mismatch.
+ pci_info = {"vendor_id": "8080", "address": "0000:0a:00.0",
+ "product_id": "5058", "physical_network": "hr_net",
+ PCI_REMOTE_MANAGED_TAG: "true"}
+ self.assertRaises(exception.PciConfigInvalidSpec,
+ devspec.PciDeviceSpec, pci_info)
+
+ @mock.patch('nova.pci.utils.is_physical_function',
+ new=mock.Mock(return_value=True))
+ def test_not_remote_managed_pf_match(self):
+ pci_info = {"vendor_id": "8086", "address": "0000:0a:00.0",
+ "product_id": "5057", "physical_network": "hr_net",
+ PCI_REMOTE_MANAGED_TAG: "false"}
+ pci = devspec.PciDeviceSpec(pci_info)
+ self.assertTrue(pci.match(self.test_dev))
+
+ @mock.patch('nova.pci.utils.is_physical_function',
+ new=mock.Mock(return_value=True))
+ def test_no_remote_managed_specified_pf_match(self):
+ pci_info = {"vendor_id": "8086", "address": "0000:0a:00.0",
+ "product_id": "5057", "physical_network": "hr_net"}
+ pci = devspec.PciDeviceSpec(pci_info)
+ self.assertTrue(pci.match(self.test_dev))
+
+ @mock.patch('nova.pci.utils.is_physical_function',
+ new=mock.Mock(return_value=False))
+ def test_remote_managed_specified_vf_match(self):
+ pci_info = {"vendor_id": "8086", "address": "0000:0a:00.0",
+ "product_id": "5057", "physical_network": "hr_net",
+ PCI_REMOTE_MANAGED_TAG: "true"}
+ pci = devspec.PciDeviceSpec(pci_info)
+ self.assertTrue(pci.match(self.test_dev))
+
+ @mock.patch('nova.pci.utils.is_physical_function',
+ new=mock.Mock(return_value=False))
+ def test_remote_managed_specified_no_serial_vf_no_match(self):
+ # No card serial number available - must not get a match.
+ test_dev = {
+ "vendor_id": "8086",
+ "product_id": "5057",
+ "address": "0000:0a:00.0",
+ }
+ pci_info = {"vendor_id": "8086", "address": "0000:0a:00.0",
+ "product_id": "5057", "physical_network": "hr_net",
+ PCI_REMOTE_MANAGED_TAG: "true"}
+ pci = devspec.PciDeviceSpec(pci_info)
+ self.assertFalse(pci.match(test_dev))
+
+ @mock.patch('nova.pci.utils.is_physical_function',
+ new=mock.Mock(return_value=False))
+ def test_remote_managed_specified_empty_serial_vf_no_match(self):
+ # Card serial is an empty string.
+ test_dev = {
+ "vendor_id": "8086",
+ "product_id": "5057",
+ "address": "0000:0a:00.0",
+ "capabilities": {"vpd": {"card_serial_number": ""}},
+ }
+ pci_info = {"vendor_id": "8086", "address": "0000:0a:00.0",
+ "product_id": "5057", "physical_network": "hr_net",
+ PCI_REMOTE_MANAGED_TAG: "true"}
+ pci = devspec.PciDeviceSpec(pci_info)
+ self.assertFalse(pci.match(test_dev))
+
+ @mock.patch('nova.pci.utils.is_physical_function',
+ new=mock.Mock(return_value=False))
+ def test_not_remote_managed_vf_match(self):
+ pci_info = {"vendor_id": "8086", "address": "0000:0a:00.0",
+ "product_id": "5057", "physical_network": "hr_net",
+ PCI_REMOTE_MANAGED_TAG: "false"}
+ pci = devspec.PciDeviceSpec(pci_info)
+ self.assertTrue(pci.match(self.test_dev))
+
+ @mock.patch('nova.pci.utils.is_physical_function',
+ new=mock.Mock(return_value=False))
+ def test_no_remote_managed_specified_vf_match(self):
+ pci_info = {"vendor_id": "8086", "address": "0000:0a:00.0",
+ "product_id": "5057", "physical_network": "hr_net"}
+ pci = devspec.PciDeviceSpec(pci_info)
+ self.assertTrue(pci.match(self.test_dev))
+
+ @mock.patch(
+ 'nova.pci.utils.is_physical_function',
+ new=mock.Mock(return_value=False)
+ )
+ def test_remote_managed_vf_match_by_pci_obj(self):
+ pci_info = {"vendor_id": "8086", "address": "0000:0a:00.2",
+ "product_id": "5057", "physical_network": "hr_net",
+ PCI_REMOTE_MANAGED_TAG: "true"}
+
+ pci = devspec.PciDeviceSpec(pci_info)
+ pci_dev = {
+ "compute_node_id": 1,
+ "address": "0000:0a:00.2",
+ "vendor_id": "8086",
+ "product_id": "5057",
+ "capabilities": {"vpd": {"card_serial_number": "MT2113X00000"}},
+ "status": "available",
+ "parent_addr": "0000:0a:00.1",
+ }
+
+ pci_obj = objects.PciDevice.create(None, pci_dev)
+ self.assertTrue(pci.match_pci_obj(pci_obj))
+
+ @mock.patch(
+ 'nova.pci.utils.is_physical_function',
+ new=mock.Mock(return_value=False)
+ )
+ def test_remote_managed_vf_no_match_by_pci_obj(self):
+ pci_info = {"vendor_id": "8086", "address": "0000:0a:00.0",
+ "product_id": "5057", "physical_network": "hr_net",
+ PCI_REMOTE_MANAGED_TAG: "true"}
+
+ pci = devspec.PciDeviceSpec(pci_info)
+ pci_dev = {
+ "compute_node_id": 1,
+ "address": "0000:0a:00.2",
+ "vendor_id": "8086",
+ "product_id": "5057",
+ "status": "available",
+ "parent_addr": "0000:0a:00.1",
+ }
+
+ pci_obj = objects.PciDevice.create(None, pci_dev)
+ self.assertFalse(pci.match_pci_obj(pci_obj))
diff --git a/nova/tests/unit/pci/test_manager.py b/nova/tests/unit/pci/test_manager.py
index 39d0b116bb..bcd4cecb85 100644
--- a/nova/tests/unit/pci/test_manager.py
+++ b/nova/tests/unit/pci/test_manager.py
@@ -14,13 +14,14 @@
# under the License.
import copy
+from unittest import mock
-import mock
from oslo_serialization import jsonutils
from oslo_utils.fixture import uuidsentinel
from nova.compute import vm_states
from nova import context
+from nova import exception
from nova import objects
from nova.objects import fields
from nova.pci import manager
@@ -42,6 +43,8 @@ fake_pci_1 = dict(fake_pci, address='0000:00:00.2',
product_id='p1', vendor_id='v1')
fake_pci_2 = dict(fake_pci, address='0000:00:00.3')
+fake_pci_devs = [fake_pci, fake_pci_1, fake_pci_2]
+
fake_pci_3 = dict(fake_pci, address='0000:00:01.1',
dev_type=fields.PciDeviceType.SRIOV_PF,
vendor_id='v2', product_id='p2', numa_node=None)
@@ -53,6 +56,7 @@ fake_pci_5 = dict(fake_pci, address='0000:00:02.2',
dev_type=fields.PciDeviceType.SRIOV_VF,
parent_addr='0000:00:01.1',
vendor_id='v2', product_id='p2', numa_node=None)
+fake_pci_devs_tree = [fake_pci_3, fake_pci_4, fake_pci_5]
fake_db_dev = {
'created_at': None,
@@ -142,14 +146,14 @@ class PciDevTrackerTestCase(test.NoDBTestCase):
requests=pci_reqs)
def _create_tracker(self, fake_devs):
- self.fake_devs = fake_devs
+ self.fake_devs = copy.deepcopy(fake_devs)
self.tracker = manager.PciDevTracker(
self.fake_context, objects.ComputeNode(id=1, numa_topology=None))
def setUp(self):
super(PciDevTrackerTestCase, self).setUp()
self.fake_context = context.get_admin_context()
- self.fake_devs = fake_db_devs[:]
+ self.fake_devs = copy.deepcopy(fake_db_devs)
self.stub_out('nova.db.main.api.pci_device_get_all_by_node',
self._fake_get_pci_devices)
# The fake_pci_whitelist must be called before creating the fake
@@ -157,7 +161,7 @@ class PciDevTrackerTestCase(test.NoDBTestCase):
patcher = pci_fakes.fake_pci_whitelist()
self.addCleanup(patcher.stop)
self._create_fake_instance()
- self._create_tracker(fake_db_devs[:])
+ self._create_tracker(fake_db_devs)
def test_pcidev_tracker_create(self):
self.assertEqual(len(self.tracker.pci_devs), 3)
@@ -231,7 +235,7 @@ class PciDevTrackerTestCase(test.NoDBTestCase):
self, mock_debug):
self.flags(
group='pci',
- passthrough_whitelist=[
+ device_spec=[
'{"product_id":"2032", "vendor_id":"8086"}'])
# There are systems where 32 bit PCI domain is used. See bug 1897528
# for example. While nova (and qemu) does not support assigning such
@@ -266,9 +270,8 @@ class PciDevTrackerTestCase(test.NoDBTestCase):
def test_set_hvdev_new_dev(self):
fake_pci_3 = dict(fake_pci, address='0000:00:00.4', vendor_id='v2')
- fake_pci_devs = [copy.deepcopy(fake_pci), copy.deepcopy(fake_pci_1),
- copy.deepcopy(fake_pci_2), copy.deepcopy(fake_pci_3)]
- self.tracker._set_hvdevs(fake_pci_devs)
+ fake_pci_devs = [fake_pci, fake_pci_1, fake_pci_2, fake_pci_3]
+ self.tracker._set_hvdevs(copy.deepcopy(fake_pci_devs))
self.assertEqual(len(self.tracker.pci_devs), 4)
self.assertEqual(set([dev.address for
dev in self.tracker.pci_devs]),
@@ -284,11 +287,8 @@ class PciDevTrackerTestCase(test.NoDBTestCase):
self._create_tracker(fake_db_devs_tree)
fake_new_device = dict(fake_pci_5, id=12, address='0000:00:02.3')
- fake_pci_devs = [copy.deepcopy(fake_pci_3),
- copy.deepcopy(fake_pci_4),
- copy.deepcopy(fake_pci_5),
- copy.deepcopy(fake_new_device)]
- self.tracker._set_hvdevs(fake_pci_devs)
+ fake_pci_devs = [fake_pci_3, fake_pci_4, fake_pci_5, fake_new_device]
+ self.tracker._set_hvdevs(copy.deepcopy(fake_pci_devs))
self.assertEqual(len(self.tracker.pci_devs), 4)
pf = [dev for dev in self.tracker.pci_devs
@@ -304,15 +304,14 @@ class PciDevTrackerTestCase(test.NoDBTestCase):
def test_set_hvdev_changed(self):
fake_pci_v2 = dict(fake_pci, address='0000:00:00.2', vendor_id='v1')
- fake_pci_devs = [copy.deepcopy(fake_pci), copy.deepcopy(fake_pci_2),
- copy.deepcopy(fake_pci_v2)]
- self.tracker._set_hvdevs(fake_pci_devs)
+ fake_pci_devs = [fake_pci, fake_pci_2, fake_pci_v2]
+ self.tracker._set_hvdevs(copy.deepcopy(fake_pci_devs))
self.assertEqual(set([dev.vendor_id for
dev in self.tracker.pci_devs]),
set(['v', 'v1']))
def test_set_hvdev_remove(self):
- self.tracker._set_hvdevs([fake_pci])
+ self.tracker._set_hvdevs(copy.deepcopy([fake_pci]))
self.assertEqual(
len([dev for dev in self.tracker.pci_devs
if dev.status == fields.PciDeviceStatus.REMOVED]),
@@ -324,8 +323,8 @@ class PciDevTrackerTestCase(test.NoDBTestCase):
# from previous scans)
self._create_tracker(fake_db_devs_tree)
- fake_pci_devs = [copy.deepcopy(fake_pci_3), copy.deepcopy(fake_pci_4)]
- self.tracker._set_hvdevs(fake_pci_devs)
+ fake_pci_devs = [fake_pci_3, fake_pci_4]
+ self.tracker._set_hvdevs(copy.deepcopy(fake_pci_devs))
self.assertEqual(
2,
len([dev for dev in self.tracker.pci_devs
@@ -344,8 +343,9 @@ class PciDevTrackerTestCase(test.NoDBTestCase):
# Make sure the device tree is properly maintained when there are
# devices removed from the system that are allocated to vms.
- all_devs = fake_db_devs_tree[:]
- self._create_tracker(all_devs)
+ all_db_devs = fake_db_devs_tree
+ all_pci_devs = fake_pci_devs_tree
+ self._create_tracker(all_db_devs)
# we start with 3 devices
self.assertEqual(
3,
@@ -358,7 +358,7 @@ class PciDevTrackerTestCase(test.NoDBTestCase):
claimed_dev = self.tracker.claim_instance(
mock.sentinel.context, pci_requests_obj, None)[0]
- self.tracker._set_hvdevs(all_devs)
+ self.tracker._set_hvdevs(copy.deepcopy(all_pci_devs))
# and assert that no devices were removed
self.assertEqual(
0,
@@ -366,10 +366,10 @@ class PciDevTrackerTestCase(test.NoDBTestCase):
if dev.status == fields.PciDeviceStatus.REMOVED]))
# we then try to remove the allocated device from the set reported
# by the driver.
- fake_pci_devs = [dev for dev in all_devs
+ fake_pci_devs = [dev for dev in all_pci_devs
if dev['address'] != claimed_dev.address]
with mock.patch("nova.pci.manager.LOG.warning") as log:
- self.tracker._set_hvdevs(fake_pci_devs)
+ self.tracker._set_hvdevs(copy.deepcopy(fake_pci_devs))
log.assert_called_once()
args = log.call_args_list[0][0] # args of first call
self.assertIn('Unable to remove device with', args[0])
@@ -380,7 +380,7 @@ class PciDevTrackerTestCase(test.NoDBTestCase):
if dev.status == fields.PciDeviceStatus.REMOVED]))
# free the device that was allocated and update tracker again
self.tracker._free_device(claimed_dev)
- self.tracker._set_hvdevs(fake_pci_devs)
+ self.tracker._set_hvdevs(copy.deepcopy(fake_pci_devs))
# and assert that one device is removed from the tracker
self.assertEqual(
1,
@@ -393,12 +393,249 @@ class PciDevTrackerTestCase(test.NoDBTestCase):
self.tracker.claim_instance(mock.sentinel.context,
pci_requests_obj, None)
fake_pci_3 = dict(fake_pci, address='0000:00:00.2', vendor_id='v2')
- fake_pci_devs = [copy.deepcopy(fake_pci), copy.deepcopy(fake_pci_2),
- copy.deepcopy(fake_pci_3)]
- self.tracker._set_hvdevs(fake_pci_devs)
+ fake_pci_devs = [fake_pci, fake_pci_2, fake_pci_3]
+ self.tracker._set_hvdevs(copy.deepcopy(fake_pci_devs))
self.assertEqual(len(self.tracker.stale), 1)
self.assertEqual(self.tracker.stale['0000:00:00.2']['vendor_id'], 'v2')
+ def _get_device_by_address(self, address):
+ devs = [dev for dev in self.tracker.pci_devs if dev.address == address]
+ if len(devs) == 1:
+ return devs[0]
+ if devs:
+ raise ValueError('ambiguous address', devs)
+ else:
+ raise ValueError('device not found', address)
+
+ def test_set_hvdevs_unavailable_vf_removed(self):
+ # We start with a PF parent and two VF children
+ self._create_tracker([fake_db_dev_3, fake_db_dev_4, fake_db_dev_5])
+ pci_requests_obj = self._create_pci_requests_object(
+ [
+ {
+ 'count': 1,
+ 'spec': [{'dev_type': fields.PciDeviceType.SRIOV_PF}]
+ }
+ ],
+ instance_uuid=uuidsentinel.instance1,
+ )
+ # then claim and allocate the PF that makes the VFs unavailable
+ self.tracker.claim_instance(
+ mock.sentinel.context, pci_requests_obj, None)
+ self.tracker.allocate_instance({'uuid': uuidsentinel.instance1})
+
+ dev3_pf = self._get_device_by_address(fake_db_dev_3['address'])
+ self.assertEqual('allocated', dev3_pf.status)
+ self.assertEqual(uuidsentinel.instance1, dev3_pf.instance_uuid)
+ dev4_vf = self._get_device_by_address(fake_db_dev_4['address'])
+ self.assertEqual('unavailable', dev4_vf.status)
+ dev5_vf = self._get_device_by_address(fake_db_dev_5['address'])
+ self.assertEqual('unavailable', dev5_vf.status)
+
+ # now simulate that one VF (dev_5) is removed from the hypervisor and
+ # the compute is restarted. As the VF is not claimed or allocated we
+ # are free to remove it from the tracker.
+ self.tracker._set_hvdevs(copy.deepcopy([fake_pci_3, fake_pci_4]))
+
+ dev3_pf = self._get_device_by_address(fake_db_dev_3['address'])
+ self.assertEqual('allocated', dev3_pf.status)
+ self.assertEqual(uuidsentinel.instance1, dev3_pf.instance_uuid)
+ dev4_vf = self._get_device_by_address(fake_db_dev_4['address'])
+ self.assertEqual('unavailable', dev4_vf.status)
+ dev5_vf = self._get_device_by_address(fake_db_dev_5['address'])
+ self.assertEqual('removed', dev5_vf.status)
+
+ def test_set_hvdevs_unavailable_pf_removed(self):
+ # We start with one PF parent and one child VF
+ self._create_tracker([fake_db_dev_3, fake_db_dev_4])
+ pci_requests_obj = self._create_pci_requests_object(
+ [
+ {
+ 'count': 1,
+ 'spec': [{'dev_type': fields.PciDeviceType.SRIOV_VF}]
+ }
+ ],
+ instance_uuid=uuidsentinel.instance1,
+ )
+ # Then we claim and allocate the VF that makes the PF unavailable
+ self.tracker.claim_instance(
+ mock.sentinel.context, pci_requests_obj, None)
+ self.tracker.allocate_instance({'uuid': uuidsentinel.instance1})
+
+ dev3_pf = self._get_device_by_address(fake_db_dev_3['address'])
+ self.assertEqual('unavailable', dev3_pf.status)
+ dev4_vf = self._get_device_by_address(fake_db_dev_4['address'])
+ self.assertEqual('allocated', dev4_vf.status)
+ self.assertEqual(uuidsentinel.instance1, dev4_vf.instance_uuid)
+
+ # now simulate that the parent PF is removed from the hypervisor and
+ # the compute is restarted. As the PF is not claimed or allocated we
+ # are free to remove it from the tracker.
+ self.tracker._set_hvdevs(copy.deepcopy([fake_pci_4]))
+
+ dev3_pf = self._get_device_by_address(fake_db_dev_3['address'])
+ self.assertEqual('removed', dev3_pf.status)
+ dev4_vf = self._get_device_by_address(fake_db_dev_4['address'])
+ self.assertEqual('allocated', dev4_vf.status)
+ self.assertEqual(uuidsentinel.instance1, dev4_vf.instance_uuid)
+
+ def test_claim_available_pf_while_child_vf_is_unavailable(self):
+ # NOTE(gibi): this is bug 1969496. The state created here is
+ # inconsistent and should not happen. But it did happen in some cases
+ # where we were not able to track down the way how it happened.
+
+ # We start with a PF parent and a VF child. The PF is available and
+ # the VF is unavailable.
+ pf = copy.deepcopy(fake_db_dev_3)
+ vf = copy.deepcopy(fake_db_dev_4)
+ vf['status'] = fields.PciDeviceStatus.UNAVAILABLE
+ self._create_tracker([pf, vf])
+
+ pf_dev = self._get_device_by_address(pf['address'])
+ self.assertEqual('available', pf_dev.status)
+ vf_dev = self._get_device_by_address(vf['address'])
+ self.assertEqual('unavailable', vf_dev.status)
+
+ pci_requests_obj = self._create_pci_requests_object(
+ [
+ {
+ 'count': 1,
+ 'spec': [{'dev_type': fields.PciDeviceType.SRIOV_PF}]
+ }
+ ],
+ instance_uuid=uuidsentinel.instance1,
+ )
+ # now try to claim and allocate the PF. It should work as it is
+ # available
+ self.tracker.claim_instance(
+ mock.sentinel.context, pci_requests_obj, None)
+ self.tracker.allocate_instance({'uuid': uuidsentinel.instance1})
+
+ pf_dev = self._get_device_by_address(pf['address'])
+ self.assertEqual('allocated', pf_dev.status)
+ vf_dev = self._get_device_by_address(vf['address'])
+ self.assertEqual('unavailable', vf_dev.status)
+
+ self.assertIn(
+ 'Some child device of parent 0000:00:01.1 is in an inconsistent '
+ 'state. If you can reproduce this warning then please report a '
+ 'bug at https://bugs.launchpad.net/nova/+filebug with '
+ 'reproduction steps. Inconsistent children with state: '
+ '0000:00:02.1 - unavailable',
+ self.stdlog.logger.output
+ )
+
+ # Ensure that the claim actually fixes the inconsistency so when the
+ # parent if freed the children become available too.
+ self.tracker.free_instance(
+ mock.sentinel.context, {'uuid': uuidsentinel.instance1})
+
+ pf_dev = self._get_device_by_address(pf['address'])
+ self.assertEqual('available', pf_dev.status)
+ vf_dev = self._get_device_by_address(vf['address'])
+ self.assertEqual('available', vf_dev.status)
+
+ def test_claim_available_pf_while_children_vfs_are_in_mixed_state(self):
+ # We start with a PF parent and two VF children. The PF is available
+ # and one of the VF is unavailable while the other is available.
+ pf = copy.deepcopy(fake_db_dev_3)
+ vf1 = copy.deepcopy(fake_db_dev_4)
+ vf1['status'] = fields.PciDeviceStatus.UNAVAILABLE
+ vf2 = copy.deepcopy(fake_db_dev_5)
+ vf2['status'] = fields.PciDeviceStatus.AVAILABLE
+ self._create_tracker([pf, vf1, vf2])
+
+ pf_dev = self._get_device_by_address(pf['address'])
+ self.assertEqual('available', pf_dev.status)
+ vf1_dev = self._get_device_by_address(vf1['address'])
+ self.assertEqual('unavailable', vf1_dev.status)
+ vf2_dev = self._get_device_by_address(vf2['address'])
+ self.assertEqual('available', vf2_dev.status)
+
+ pci_requests_obj = self._create_pci_requests_object(
+ [
+ {
+ 'count': 1,
+ 'spec': [{'dev_type': fields.PciDeviceType.SRIOV_PF}]
+ }
+ ],
+ instance_uuid=uuidsentinel.instance1,
+ )
+ # now try to claim and allocate the PF. It should work as it is
+ # available
+ self.tracker.claim_instance(
+ mock.sentinel.context, pci_requests_obj, None)
+ self.tracker.allocate_instance({'uuid': uuidsentinel.instance1})
+
+ pf_dev = self._get_device_by_address(pf['address'])
+ self.assertEqual('allocated', pf_dev.status)
+ vf1_dev = self._get_device_by_address(vf1['address'])
+ self.assertEqual('unavailable', vf1_dev.status)
+ vf2_dev = self._get_device_by_address(vf2['address'])
+ self.assertEqual('unavailable', vf2_dev.status)
+
+ self.assertIn(
+ 'Some child device of parent 0000:00:01.1 is in an inconsistent '
+ 'state. If you can reproduce this warning then please report a '
+ 'bug at https://bugs.launchpad.net/nova/+filebug with '
+ 'reproduction steps. Inconsistent children with state: '
+ '0000:00:02.1 - unavailable',
+ self.stdlog.logger.output
+ )
+
+ # Ensure that the claim actually fixes the inconsistency so when the
+ # parent if freed the children become available too.
+ self.tracker.free_instance(
+ mock.sentinel.context, {'uuid': uuidsentinel.instance1})
+
+ pf_dev = self._get_device_by_address(pf['address'])
+ self.assertEqual('available', pf_dev.status)
+ vf1_dev = self._get_device_by_address(vf1['address'])
+ self.assertEqual('available', vf1_dev.status)
+ vf2_dev = self._get_device_by_address(vf2['address'])
+ self.assertEqual('available', vf2_dev.status)
+
+ def test_claim_available_pf_while_a_child_is_used(self):
+ pf = copy.deepcopy(fake_db_dev_3)
+ vf1 = copy.deepcopy(fake_db_dev_4)
+ vf1['status'] = fields.PciDeviceStatus.UNAVAILABLE
+ vf2 = copy.deepcopy(fake_db_dev_5)
+ vf2['status'] = fields.PciDeviceStatus.CLAIMED
+ self._create_tracker([pf, vf1, vf2])
+
+ pf_dev = self._get_device_by_address(pf['address'])
+ self.assertEqual('available', pf_dev.status)
+ vf1_dev = self._get_device_by_address(vf1['address'])
+ self.assertEqual('unavailable', vf1_dev.status)
+ vf2_dev = self._get_device_by_address(vf2['address'])
+ self.assertEqual('claimed', vf2_dev.status)
+
+ pci_requests_obj = self._create_pci_requests_object(
+ [
+ {
+ 'count': 1,
+ 'spec': [{'dev_type': fields.PciDeviceType.SRIOV_PF}]
+ }
+ ],
+ instance_uuid=uuidsentinel.instance1,
+ )
+ # now try to claim and allocate the PF. The claim should fail as on of
+ # the child is used.
+ self.assertRaises(
+ exception.PciDeviceVFInvalidStatus,
+ self.tracker.claim_instance,
+ mock.sentinel.context,
+ pci_requests_obj,
+ None,
+ )
+
+ pf_dev = self._get_device_by_address(pf['address'])
+ self.assertEqual('available', pf_dev.status)
+ vf1_dev = self._get_device_by_address(vf1['address'])
+ self.assertEqual('unavailable', vf1_dev.status)
+ vf2_dev = self._get_device_by_address(vf2['address'])
+ self.assertEqual('claimed', vf2_dev.status)
+
def test_update_pci_for_instance_active(self):
pci_requests_obj = self._create_pci_requests_object(fake_pci_requests)
self.tracker.claim_instance(mock.sentinel.context,
@@ -414,8 +651,13 @@ class PciDevTrackerTestCase(test.NoDBTestCase):
pci_requests = copy.deepcopy(fake_pci_requests)
pci_requests[0]['count'] = 4
pci_requests_obj = self._create_pci_requests_object(pci_requests)
- self.tracker.claim_instance(mock.sentinel.context,
- pci_requests_obj, None)
+ self.assertRaises(
+ exception.PciDeviceRequestFailed,
+ self.tracker.claim_instance,
+ mock.sentinel.context,
+ pci_requests_obj,
+ None
+ )
self.assertEqual(len(self.tracker.claims[self.inst['uuid']]), 0)
devs = self.tracker.update_pci_for_instance(None,
self.inst,
@@ -424,13 +666,13 @@ class PciDevTrackerTestCase(test.NoDBTestCase):
self.assertIsNone(devs)
def test_pci_claim_instance_with_numa(self):
- fake_db_dev_3 = dict(fake_db_dev_1, id=4, address='0000:00:00.4')
- fake_devs_numa = copy.deepcopy(fake_db_devs)
- fake_devs_numa.append(fake_db_dev_3)
+ fake_pci_3 = dict(fake_pci_1, address='0000:00:00.4')
+ fake_devs_numa = copy.deepcopy(fake_pci_devs)
+ fake_devs_numa.append(fake_pci_3)
self.tracker = manager.PciDevTracker(
mock.sentinel.context,
objects.ComputeNode(id=1, numa_topology=None))
- self.tracker._set_hvdevs(fake_devs_numa)
+ self.tracker._set_hvdevs(copy.deepcopy(fake_devs_numa))
pci_requests = copy.deepcopy(fake_pci_requests)[:1]
pci_requests[0]['count'] = 2
pci_requests_obj = self._create_pci_requests_object(pci_requests)
@@ -450,11 +692,13 @@ class PciDevTrackerTestCase(test.NoDBTestCase):
self.inst.numa_topology = objects.InstanceNUMATopology(
cells=[objects.InstanceNUMACell(
id=1, cpuset=set([1, 2]), memory=512)])
- claims = self.tracker.claim_instance(
+ self.assertRaises(
+ exception.PciDeviceRequestFailed,
+ self.tracker.claim_instance,
mock.sentinel.context,
pci_requests_obj,
- self.inst.numa_topology)
- self.assertEqual([], claims)
+ self.inst.numa_topology
+ )
def test_update_pci_for_instance_deleted(self):
pci_requests_obj = self._create_pci_requests_object(fake_pci_requests)
@@ -477,9 +721,8 @@ class PciDevTrackerTestCase(test.NoDBTestCase):
'nova.db.main.api.pci_device_update',
self._fake_pci_device_update)
fake_pci_v3 = dict(fake_pci, address='0000:00:00.2', vendor_id='v3')
- fake_pci_devs = [copy.deepcopy(fake_pci), copy.deepcopy(fake_pci_2),
- copy.deepcopy(fake_pci_v3)]
- self.tracker._set_hvdevs(fake_pci_devs)
+ fake_pci_devs = [fake_pci, fake_pci_2, fake_pci_v3]
+ self.tracker._set_hvdevs(copy.deepcopy(fake_pci_devs))
self.update_called = 0
self.tracker.save(self.fake_context)
self.assertEqual(self.update_called, 3)
@@ -503,7 +746,10 @@ class PciDevTrackerTestCase(test.NoDBTestCase):
def test_clean_usage(self):
inst_2 = copy.copy(self.inst)
inst_2.uuid = uuidsentinel.instance2
- migr = {'instance_uuid': 'uuid2', 'vm_state': vm_states.BUILDING}
+ migr = objects.Migration(
+ instance_uuid='uuid2',
+ vm_state=vm_states.BUILDING,
+ )
pci_requests_obj = self._create_pci_requests_object(
[{'count': 1, 'spec': [{'vendor_id': 'v'}]}])
@@ -564,7 +810,7 @@ class PciDevTrackerTestCase(test.NoDBTestCase):
free_pci_device_ids = (
[dev.id for dev in self.tracker.pci_stats.get_free_devs()])
self.assertEqual(2, len(free_pci_device_ids))
- allocated_devs = manager.get_instance_pci_devs(self.inst)
+ allocated_devs = self.inst.get_pci_devices()
pci_device = allocated_devs[0]
self.assertNotIn(pci_device.id, free_pci_device_ids)
instance_uuid = self.inst['uuid']
@@ -627,24 +873,3 @@ class PciDevTrackerTestCase(test.NoDBTestCase):
self.assertIsNone(self.tracker.allocations.get(instance_uuid))
free_devs = self.tracker.pci_stats.get_free_devs()
self.assertEqual(len(fake_db_devs), len(free_devs))
-
-
-class PciGetInstanceDevs(test.NoDBTestCase):
-
- def test_get_devs_object(self):
- def _fake_obj_load_attr(foo, attrname):
- if attrname == 'pci_devices':
- self.load_attr_called = True
- foo.pci_devices = objects.PciDeviceList()
-
- self.stub_out(
- 'nova.objects.Instance.obj_load_attr',
- _fake_obj_load_attr)
-
- self.load_attr_called = False
- manager.get_instance_pci_devs(objects.Instance())
- self.assertTrue(self.load_attr_called)
-
- def test_get_devs_no_pci_devices(self):
- inst = objects.Instance(pci_devices=None)
- self.assertEqual([], manager.get_instance_pci_devs(inst))
diff --git a/nova/tests/unit/pci/test_request.py b/nova/tests/unit/pci/test_request.py
index 3c2ba5b61f..4a3f17f6cb 100644
--- a/nova/tests/unit/pci/test_request.py
+++ b/nova/tests/unit/pci/test_request.py
@@ -15,7 +15,8 @@
"""Tests for PCI request."""
-import mock
+from unittest import mock
+
from oslo_serialization import jsonutils
from oslo_utils.fixture import uuidsentinel
@@ -186,6 +187,21 @@ class PciRequestTestCase(test.NoDBTestCase):
self.assertIn("xxx", aliases)
self.assertEqual(policy, aliases["xxx"][0])
+ def test_get_alias_from_config_valid_rc_and_traits(self):
+ fake_alias = jsonutils.dumps({
+ "name": "xxx",
+ "resource_class": "foo",
+ "traits": "bar,baz",
+ })
+ self.flags(alias=[fake_alias], group='pci')
+ aliases = request._get_alias_from_config()
+ self.assertIsNotNone(aliases)
+ self.assertIn("xxx", aliases)
+ self.assertEqual(
+ ("legacy", [{"resource_class": "foo", "traits": "bar,baz"}]),
+ aliases["xxx"],
+ )
+
def test_get_alias_from_config_conflicting_device_type(self):
"""Check behavior when device_type conflicts occur."""
fake_alias_a = jsonutils.dumps({
@@ -255,7 +271,7 @@ class PciRequestTestCase(test.NoDBTestCase):
requests = request._translate_alias_to_requests(
"QuickAssist : 3, IntelNIC: 1")
- self.assertEqual(set([p['count'] for p in requests]), set([1, 3]))
+ self.assertEqual(set([p.count for p in requests]), set([1, 3]))
self._verify_result(expect_request, requests)
def test_translate_alias_to_requests_invalid(self):
@@ -292,7 +308,7 @@ class PciRequestTestCase(test.NoDBTestCase):
requests = request._translate_alias_to_requests(
"QuickAssist : 3, IntelNIC: 1", affinity_policy=policy)
- self.assertEqual(set([p['count'] for p in requests]), set([1, 3]))
+ self.assertEqual(set([p.count for p in requests]), set([1, 3]))
self._verify_result(expect_request, requests)
@mock.patch.object(objects.compute_node.ComputeNode,
diff --git a/nova/tests/unit/pci/test_stats.py b/nova/tests/unit/pci/test_stats.py
index 394a07f9eb..7eb43a05f4 100644
--- a/nova/tests/unit/pci/test_stats.py
+++ b/nova/tests/unit/pci/test_stats.py
@@ -12,13 +12,17 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
+import collections
+from unittest import mock
-import mock
from oslo_config import cfg
+from oslo_serialization import jsonutils
+from oslo_utils.fixture import uuidsentinel as uuids
from nova import exception
from nova import objects
from nova.objects import fields
+from nova.pci.request import PCI_REMOTE_MANAGED_TAG
from nova.pci import stats
from nova.pci import whitelist
from nova import test
@@ -97,33 +101,26 @@ class PciDeviceStatsTestCase(test.NoDBTestCase):
def setUp(self):
super(PciDeviceStatsTestCase, self).setUp()
- self._setup_pci_stats()
-
- def _setup_pci_stats(self, numa_topology=None):
- """Exists for tests that need to setup pci_stats with a specific NUMA
- topology, while still allowing tests that don't care to get the default
- "empty" one.
- """
- if not numa_topology:
- numa_topology = objects.NUMATopology()
- self.pci_stats = stats.PciDeviceStats(numa_topology)
+ self.pci_stats = stats.PciDeviceStats(objects.NUMATopology())
# The following two calls need to be made before adding the devices.
patcher = fakes.fake_pci_whitelist()
self.addCleanup(patcher.stop)
self._create_fake_devs()
def test_add_device(self):
- self.assertEqual(len(self.pci_stats.pools), 3)
+ self.assertEqual(len(self.pci_stats.pools), 4)
self.assertEqual(set([d['vendor_id'] for d in self.pci_stats]),
set(['v1', 'v2', 'v3']))
- self.assertEqual(set([d['count'] for d in self.pci_stats]),
- set([1, 2]))
+ self.assertEqual([d['count'] for d in self.pci_stats], [1, 1, 1, 1])
def test_remove_device(self):
+ self.assertEqual(len(self.pci_stats.pools), 4)
self.pci_stats.remove_device(self.fake_dev_2)
- self.assertEqual(len(self.pci_stats.pools), 2)
- self.assertEqual(self.pci_stats.pools[0]['count'], 2)
+ self.assertEqual(len(self.pci_stats.pools), 3)
+ self.assertEqual(self.pci_stats.pools[0]['count'], 1)
self.assertEqual(self.pci_stats.pools[0]['vendor_id'], 'v1')
+ self.assertEqual(self.pci_stats.pools[1]['count'], 1)
+ self.assertEqual(self.pci_stats.pools[1]['vendor_id'], 'v1')
def test_remove_device_exception(self):
self.pci_stats.remove_device(self.fake_dev_2)
@@ -152,36 +149,36 @@ class PciDeviceStatsTestCase(test.NoDBTestCase):
m = self.pci_stats.to_device_pools_obj()
new_stats = stats.PciDeviceStats(objects.NUMATopology(), m)
- self.assertEqual(len(new_stats.pools), 3)
- self.assertEqual(set([d['count'] for d in new_stats]),
- set([1, 2]))
+ self.assertEqual(len(new_stats.pools), 4)
+ self.assertEqual([d['count'] for d in new_stats], [1, 1, 1, 1])
self.assertEqual(set([d['vendor_id'] for d in new_stats]),
set(['v1', 'v2', 'v3']))
def test_apply_requests(self):
- self.assertEqual(len(self.pci_stats.pools), 3)
- self.pci_stats.apply_requests(pci_requests)
+ self.assertEqual(len(self.pci_stats.pools), 4)
+ self.pci_stats.apply_requests(pci_requests, {})
self.assertEqual(len(self.pci_stats.pools), 2)
self.assertEqual(self.pci_stats.pools[0]['vendor_id'], 'v1')
self.assertEqual(self.pci_stats.pools[0]['count'], 1)
def test_apply_requests_failed(self):
- self.assertRaises(exception.PciDeviceRequestFailed,
+ self.assertRaises(
+ exception.PciDeviceRequestFailed,
self.pci_stats.apply_requests,
- pci_requests_multiple)
+ pci_requests_multiple,
+ {},
+ )
def test_support_requests(self):
- self.assertTrue(self.pci_stats.support_requests(pci_requests))
- self.assertEqual(len(self.pci_stats.pools), 3)
- self.assertEqual(set([d['count'] for d in self.pci_stats]),
- set((1, 2)))
+ self.assertTrue(self.pci_stats.support_requests(pci_requests, {}))
+ self.assertEqual(len(self.pci_stats.pools), 4)
+ self.assertEqual([d['count'] for d in self.pci_stats], [1, 1, 1, 1])
def test_support_requests_failed(self):
self.assertFalse(
- self.pci_stats.support_requests(pci_requests_multiple))
- self.assertEqual(len(self.pci_stats.pools), 3)
- self.assertEqual(set([d['count'] for d in self.pci_stats]),
- set([1, 2]))
+ self.pci_stats.support_requests(pci_requests_multiple, {}))
+ self.assertEqual(len(self.pci_stats.pools), 4)
+ self.assertEqual([d['count'] for d in self.pci_stats], [1, 1, 1, 1])
def test_support_requests_numa(self):
cells = [
@@ -190,14 +187,18 @@ class PciDeviceStatsTestCase(test.NoDBTestCase):
objects.InstanceNUMACell(
id=1, cpuset=set(), pcpuset=set(), memory=0),
]
- self.assertTrue(self.pci_stats.support_requests(pci_requests, cells))
+ self.assertTrue(
+ self.pci_stats.support_requests(pci_requests, {}, cells)
+ )
def test_support_requests_numa_failed(self):
cells = [
objects.InstanceNUMACell(
id=0, cpuset=set(), pcpuset=set(), memory=0),
]
- self.assertFalse(self.pci_stats.support_requests(pci_requests, cells))
+ self.assertFalse(
+ self.pci_stats.support_requests(pci_requests, {}, cells)
+ )
def test_support_requests_no_numa_info(self):
cells = [
@@ -205,12 +206,16 @@ class PciDeviceStatsTestCase(test.NoDBTestCase):
id=0, cpuset=set(), pcpuset=set(), memory=0),
]
pci_requests = self._get_fake_requests(vendor_ids=['v3'])
- self.assertTrue(self.pci_stats.support_requests(pci_requests, cells))
+ self.assertTrue(
+ self.pci_stats.support_requests(pci_requests, {}, cells)
+ )
# 'legacy' is the default numa_policy so the result must be same
pci_requests = self._get_fake_requests(vendor_ids=['v3'],
numa_policy = fields.PCINUMAAffinityPolicy.LEGACY)
- self.assertTrue(self.pci_stats.support_requests(pci_requests, cells))
+ self.assertTrue(
+ self.pci_stats.support_requests(pci_requests, {}, cells)
+ )
def test_support_requests_numa_pci_numa_policy_preferred(self):
# numa node 0 has 2 devices with vendor_id 'v1'
@@ -224,7 +229,9 @@ class PciDeviceStatsTestCase(test.NoDBTestCase):
pci_requests = self._get_fake_requests(
numa_policy=fields.PCINUMAAffinityPolicy.PREFERRED)
- self.assertTrue(self.pci_stats.support_requests(pci_requests, cells))
+ self.assertTrue(
+ self.pci_stats.support_requests(pci_requests, {}, cells)
+ )
def test_support_requests_no_numa_info_pci_numa_policy_required(self):
# pci device with vendor_id 'v3' has numa_node=None.
@@ -236,21 +243,23 @@ class PciDeviceStatsTestCase(test.NoDBTestCase):
pci_requests = self._get_fake_requests(vendor_ids=['v3'],
numa_policy=fields.PCINUMAAffinityPolicy.REQUIRED)
- self.assertFalse(self.pci_stats.support_requests(pci_requests, cells))
+ self.assertFalse(
+ self.pci_stats.support_requests(pci_requests, {}, cells)
+ )
def test_filter_pools_for_socket_affinity_no_socket(self):
- self._setup_pci_stats(
- objects.NUMATopology(
- cells=[objects.NUMACell(socket=None)]))
+ self.pci_stats.numa_topology = objects.NUMATopology(
+ cells=[objects.NUMACell(socket=None)])
+
self.assertEqual(
[],
self.pci_stats._filter_pools_for_socket_affinity(
self.pci_stats.pools, [objects.InstanceNUMACell()]))
def test_filter_pools_for_socket_affinity(self):
- self._setup_pci_stats(
- objects.NUMATopology(
- cells=[objects.NUMACell(id=1, socket=1)]))
+ self.pci_stats.numa_topology = objects.NUMATopology(
+ cells=[objects.NUMACell(id=1, socket=1)])
+
pools = self.pci_stats._filter_pools_for_socket_affinity(
self.pci_stats.pools, [objects.InstanceNUMACell(id=1)])
self.assertEqual(1, len(pools))
@@ -268,8 +277,11 @@ class PciDeviceStatsTestCase(test.NoDBTestCase):
self.assertEqual(0, len(devs))
def test_consume_requests_failed(self):
- self.assertIsNone(self.pci_stats.consume_requests(
- pci_requests_multiple))
+ self.assertRaises(
+ exception.PciDeviceRequestFailed,
+ self.pci_stats.consume_requests,
+ pci_requests_multiple,
+ )
def test_consume_requests_numa(self):
cells = [
@@ -288,7 +300,12 @@ class PciDeviceStatsTestCase(test.NoDBTestCase):
objects.InstanceNUMACell(
id=0, cpuset=set(), pcpuset=set(), memory=0),
]
- self.assertIsNone(self.pci_stats.consume_requests(pci_requests, cells))
+ self.assertRaises(
+ exception.PciDeviceRequestFailed,
+ self.pci_stats.consume_requests,
+ pci_requests,
+ cells,
+ )
def test_consume_requests_no_numa_info(self):
cells = [
@@ -320,11 +337,16 @@ class PciDeviceStatsTestCase(test.NoDBTestCase):
pci_requests = self._get_fake_requests(vendor_ids=[vendor_id],
numa_policy=policy, count=count)
- devs = self.pci_stats.consume_requests(pci_requests, cells)
if expected is None:
- self.assertIsNone(devs)
+ self.assertRaises(
+ exception.PciDeviceRequestFailed,
+ self.pci_stats.consume_requests,
+ pci_requests,
+ cells,
+ )
else:
+ devs = self.pci_stats.consume_requests(pci_requests, cells)
self.assertEqual(set(expected),
set([dev.product_id for dev in devs]))
@@ -451,9 +473,9 @@ class PciDeviceStatsTestCase(test.NoDBTestCase):
@mock.patch(
'nova.pci.whitelist.Whitelist._parse_white_list_from_config')
- def test_white_list_parsing(self, mock_whitelist_parse):
- white_list = '{"product_id":"0001", "vendor_id":"8086"}'
- CONF.set_override('passthrough_whitelist', white_list, 'pci')
+ def test_device_spec_parsing(self, mock_whitelist_parse):
+ device_spec = {"product_id": "0001", "vendor_id": "8086"}
+ CONF.set_override('device_spec', jsonutils.dumps(device_spec), 'pci')
pci_stats = stats.PciDeviceStats(objects.NUMATopology())
pci_stats.add_device(self.fake_dev_2)
pci_stats.remove_device(self.fake_dev_2)
@@ -464,11 +486,34 @@ class PciDeviceStatsWithTagsTestCase(test.NoDBTestCase):
def setUp(self):
super(PciDeviceStatsWithTagsTestCase, self).setUp()
- white_list = ['{"vendor_id":"1137","product_id":"0071",'
- '"address":"*:0a:00.*","physical_network":"physnet1"}',
- '{"vendor_id":"1137","product_id":"0072"}']
- self.flags(passthrough_whitelist=white_list, group='pci')
- dev_filter = whitelist.Whitelist(white_list)
+ device_spec = [
+ jsonutils.dumps(
+ {
+ "vendor_id": "1137",
+ "product_id": "0071",
+ "address": "*:0a:00.*",
+ "physical_network": "physnet1",
+ }
+ ),
+ jsonutils.dumps({"vendor_id": "1137", "product_id": "0072"}),
+ jsonutils.dumps(
+ {
+ "vendor_id": "15b3",
+ "product_id": "101e",
+ "remote_managed": "true",
+ }
+ ),
+ jsonutils.dumps({"vendor_id": "15b3", "product_id": "101c"}),
+ jsonutils.dumps(
+ {
+ "vendor_id": "15b3",
+ "product_id": "1018",
+ "remote_managed": "false",
+ }
+ ),
+ ]
+ self.flags(device_spec=device_spec, group="pci")
+ dev_filter = whitelist.Whitelist(device_spec)
self.pci_stats = stats.PciDeviceStats(
objects.NUMATopology(),
dev_filter=dev_filter)
@@ -502,12 +547,64 @@ class PciDeviceStatsWithTagsTestCase(test.NoDBTestCase):
self.pci_untagged_devices.append(objects.PciDevice.create(None,
pci_dev))
+ self.locally_managed_netdevs = []
+ self.remote_managed_netdevs = []
+ self.remote_managed_netdevs.append(
+ objects.PciDevice.create(
+ None, {
+ 'compute_node_id': 1,
+ 'address': '0000:0c:00.1',
+ 'vendor_id': '15b3',
+ 'product_id': '101e',
+ 'status': 'available',
+ 'request_id': None,
+ 'dev_type': fields.PciDeviceType.SRIOV_VF,
+ 'parent_addr': '0000:0c:00.0',
+ 'numa_node': 0,
+ "capabilities": {"vpd": {
+ "card_serial_number": "MT2113X00000"}}
+ }))
+
+ # For testing implicit remote_managed == False tagging.
+ self.locally_managed_netdevs.append(
+ objects.PciDevice.create(
+ None, {
+ 'compute_node_id': 1,
+ 'address': '0000:0d:00.1',
+ 'vendor_id': '15b3',
+ 'product_id': '101c',
+ 'status': 'available',
+ 'request_id': None,
+ 'dev_type': fields.PciDeviceType.SRIOV_VF,
+ 'parent_addr': '0000:0d:00.0',
+ 'numa_node': 0}))
+
+ # For testing explicit remote_managed == False tagging.
+ self.locally_managed_netdevs.append(
+ objects.PciDevice.create(
+ None, {
+ 'compute_node_id': 1,
+ 'address': '0000:0e:00.1',
+ 'vendor_id': '15b3',
+ 'product_id': '101c',
+ 'status': 'available',
+ 'request_id': None,
+ 'dev_type': fields.PciDeviceType.SRIOV_VF,
+ 'parent_addr': '0000:0e:00.0',
+ 'numa_node': 0}))
+
for dev in self.pci_tagged_devices:
self.pci_stats.add_device(dev)
for dev in self.pci_untagged_devices:
self.pci_stats.add_device(dev)
+ for dev in self.remote_managed_netdevs:
+ self.pci_stats.add_device(dev)
+
+ for dev in self.locally_managed_netdevs:
+ self.pci_stats.add_device(dev)
+
def _assertPoolContent(self, pool, vendor_id, product_id, count, **tags):
self.assertEqual(vendor_id, pool['vendor_id'])
self.assertEqual(product_id, pool['product_id'])
@@ -517,21 +614,68 @@ class PciDeviceStatsWithTagsTestCase(test.NoDBTestCase):
self.assertEqual(v, pool[k])
def _assertPools(self):
+ nr_tagged = len(self.pci_tagged_devices)
+ nr_untagged = len(self.pci_untagged_devices)
+ nr_remote = len(self.remote_managed_netdevs)
+ nr_local = len(self.locally_managed_netdevs)
+ self.assertEqual(
+ nr_tagged + nr_untagged + nr_remote + nr_local,
+ len(self.pci_stats.pools),
+ )
# Pools are ordered based on the number of keys. 'product_id',
# 'vendor_id' are always part of the keys. When tags are present,
- # they are also part of the keys. In this test class, we have
- # two pools with the second one having the tag 'physical_network'
- # and the value 'physnet1'
- self.assertEqual(2, len(self.pci_stats.pools))
- self._assertPoolContent(self.pci_stats.pools[0], '1137', '0072',
- len(self.pci_untagged_devices))
- self.assertEqual(self.pci_untagged_devices,
- self.pci_stats.pools[0]['devices'])
- self._assertPoolContent(self.pci_stats.pools[1], '1137', '0071',
- len(self.pci_tagged_devices),
- physical_network='physnet1')
- self.assertEqual(self.pci_tagged_devices,
- self.pci_stats.pools[1]['devices'])
+ # they are also part of the keys.
+
+ # 3 pools for the pci_untagged_devices
+ devs = []
+ j = 0
+ for i in range(j, j + nr_untagged):
+ self._assertPoolContent(self.pci_stats.pools[i], '1137', '0072', 1)
+ devs += self.pci_stats.pools[i]['devices']
+ self.assertEqual(self.pci_untagged_devices, devs)
+ j += nr_untagged
+
+ # 4 pools for the pci_tagged_devices'
+ devs = []
+ for i in range(j, j + nr_tagged):
+ self._assertPoolContent(
+ self.pci_stats.pools[i],
+ "1137",
+ "0071",
+ 1,
+ physical_network="physnet1",
+ )
+ devs += self.pci_stats.pools[i]['devices']
+ self.assertEqual(self.pci_tagged_devices, devs)
+ j += nr_tagged
+
+ # one with remote_managed_netdevs
+ devs = []
+ for i in range(j, j + nr_remote):
+ self._assertPoolContent(
+ self.pci_stats.pools[i],
+ "15b3",
+ "101e",
+ 1,
+ remote_managed="true",
+ )
+ devs += self.pci_stats.pools[i]['devices']
+ self.assertEqual(self.remote_managed_netdevs, devs)
+ j += nr_remote
+
+ # two with locally_managed_netdevs
+ devs = []
+ for i in range(j, j + nr_local):
+ self._assertPoolContent(
+ self.pci_stats.pools[i],
+ "15b3",
+ "101c",
+ 1,
+ remote_managed="false",
+ )
+ devs += self.pci_stats.pools[i]['devices']
+ self.assertEqual(self.locally_managed_netdevs, devs)
+ j += nr_local
def test_add_devices(self):
self._create_pci_devices()
@@ -543,15 +687,43 @@ class PciDeviceStatsWithTagsTestCase(test.NoDBTestCase):
spec=[{'physical_network': 'physnet1'}]),
objects.InstancePCIRequest(count=1,
spec=[{'vendor_id': '1137',
- 'product_id': '0072'}])]
+ 'product_id': '0072'}]),
+ objects.InstancePCIRequest(count=1,
+ spec=[{'vendor_id': '15b3',
+ 'product_id': '101e',
+ PCI_REMOTE_MANAGED_TAG: 'True'}]),
+ objects.InstancePCIRequest(count=1,
+ spec=[{'vendor_id': '15b3',
+ 'product_id': '101c',
+ PCI_REMOTE_MANAGED_TAG: 'False'}]),
+ objects.InstancePCIRequest(count=1,
+ spec=[{'vendor_id': '15b3',
+ 'product_id': '101c',
+ PCI_REMOTE_MANAGED_TAG: 'False'}])]
devs = self.pci_stats.consume_requests(pci_requests)
- self.assertEqual(2, len(devs))
- self.assertEqual(set(['0071', '0072']),
+ self.assertEqual(5, len(devs))
+ self.assertEqual(set(['0071', '0072', '101e', '101c']),
set([dev.product_id for dev in devs]))
- self._assertPoolContent(self.pci_stats.pools[0], '1137', '0072', 2)
- self._assertPoolContent(self.pci_stats.pools[1], '1137', '0071', 3,
+ self._assertPoolContent(self.pci_stats.pools[0], '1137', '0072', 0)
+ self._assertPoolContent(self.pci_stats.pools[1], '1137', '0072', 1)
+ self._assertPoolContent(self.pci_stats.pools[2], '1137', '0072', 1)
+
+ self._assertPoolContent(self.pci_stats.pools[3], '1137', '0071', 0,
+ physical_network='physnet1')
+ self._assertPoolContent(self.pci_stats.pools[4], '1137', '0071', 1,
+ physical_network='physnet1')
+ self._assertPoolContent(self.pci_stats.pools[5], '1137', '0071', 1,
+ physical_network='physnet1')
+ self._assertPoolContent(self.pci_stats.pools[6], '1137', '0071', 1,
physical_network='physnet1')
+ self._assertPoolContent(self.pci_stats.pools[7], '15b3', '101e', 0,
+ remote_managed='true')
+ self._assertPoolContent(self.pci_stats.pools[8], '15b3', '101c', 0,
+ remote_managed='false')
+ self._assertPoolContent(self.pci_stats.pools[9], '15b3', '101c', 0,
+ remote_managed='false')
+
def test_add_device_no_devspec(self):
self._create_pci_devices()
pci_dev = {'compute_node_id': 1,
@@ -592,38 +764,779 @@ class PciDeviceStatsWithTagsTestCase(test.NoDBTestCase):
self.pci_stats.remove_device(dev2)
self._assertPools()
- def test_update_device(self):
- # Update device type of one of the device from type-PCI to
+ def test_update_device_splits_the_pool(self):
+ # Update device type of one of the device from type-VF to
# type-PF. Verify if the existing pool is updated and a new
# pool is created with dev_type type-PF.
- self._create_pci_devices()
- dev1 = self.pci_tagged_devices.pop()
- dev1.dev_type = 'type-PF'
- self.pci_stats.update_device(dev1)
- self.assertEqual(3, len(self.pci_stats.pools))
- self._assertPoolContent(self.pci_stats.pools[0], '1137', '0072',
- len(self.pci_untagged_devices))
- self.assertEqual(self.pci_untagged_devices,
- self.pci_stats.pools[0]['devices'])
- self._assertPoolContent(self.pci_stats.pools[1], '1137', '0071',
- len(self.pci_tagged_devices),
- physical_network='physnet1')
- self.assertEqual(self.pci_tagged_devices,
- self.pci_stats.pools[1]['devices'])
- self._assertPoolContent(self.pci_stats.pools[2], '1137', '0071',
- 1,
- physical_network='physnet1')
- self.assertEqual(dev1,
- self.pci_stats.pools[2]['devices'][0])
+ vfs = []
+ for i in range(3):
+ dev = objects.PciDevice(
+ compute_node_id=1,
+ address="0000:0a:00.%d" % i,
+ vendor_id="1137",
+ product_id="0071",
+ status="available",
+ dev_type="type-VF",
+ parent_addr="0000:0a:01.0",
+ numa_node=0
+ )
+ vfs.append(dev)
+ self.pci_stats.add_device(dev)
+
+ self.assertEqual(1, len(self.pci_stats.pools))
+ self.assertEqual(3, self.pci_stats.pools[0]["count"])
+ self.assertEqual(vfs, self.pci_stats.pools[0]["devices"])
+
+ dev = vfs.pop()
+ dev.dev_type = 'type-PF'
+ dev.parent_addr = None
+ self.pci_stats.update_device(dev)
+ self.assertEqual(2, len(self.pci_stats.pools))
+ self.assertEqual(2, self.pci_stats.pools[0]["count"])
+ self.assertEqual(vfs, self.pci_stats.pools[0]["devices"])
+ self.assertEqual(1, self.pci_stats.pools[1]["count"])
+ self.assertEqual([dev], self.pci_stats.pools[1]["devices"])
+
+ def test_only_vfs_from_the_same_parent_are_pooled(self):
+ pf1_vfs = []
+ for i in range(2):
+ dev = objects.PciDevice(
+ compute_node_id=1,
+ address="0000:0a:00.%d" % i,
+ vendor_id="15b3",
+ product_id="1018",
+ status="available",
+ dev_type="type-VF",
+ parent_addr="0000:0a:01.0",
+ numa_node=0
+ )
+ pf1_vfs.append(dev)
+ self.pci_stats.add_device(dev)
+
+ pf2_vfs = []
+ for i in range(2):
+ dev = objects.PciDevice(
+ compute_node_id=1,
+ address="0000:0b:00.%d" % i,
+ vendor_id="15b3",
+ product_id="1018",
+ status="available",
+ dev_type="type-VF",
+ parent_addr="0000:0b:01.0",
+ numa_node=0
+ )
+ pf2_vfs.append(dev)
+ self.pci_stats.add_device(dev)
+
+ self.assertEqual(2, len(self.pci_stats.pools))
+ self.assertEqual(2, self.pci_stats.pools[0]["count"])
+ self.assertEqual(pf1_vfs, self.pci_stats.pools[0]["devices"])
+ self.assertEqual(2, len(self.pci_stats.pools))
+ self.assertEqual(2, self.pci_stats.pools[1]["count"])
+ self.assertEqual(pf2_vfs, self.pci_stats.pools[1]["devices"])
+
+
+class PciDeviceStatsPlacementSupportTestCase(test.NoDBTestCase):
+
+ def test_device_spec_rc_and_traits_ignored_during_pooling(self):
+ """Assert that resource_class and traits from the device spec are not
+ used as discriminator for pool creation.
+ """
+ device_spec = [
+ jsonutils.dumps(
+ {
+ "resource_class": "foo",
+ "address": "*:81:00.1",
+ "traits": "gold",
+ }
+ ),
+ jsonutils.dumps(
+ {
+ "resource_class": "baar",
+ "address": "*:81:00.2",
+ "traits": "silver",
+ }
+ ),
+ ]
+ self.flags(device_spec=device_spec, group="pci")
+ dev_filter = whitelist.Whitelist(device_spec)
+ pci_stats = stats.PciDeviceStats(
+ objects.NUMATopology(),
+ dev_filter=dev_filter)
+ pci_dev1 = objects.PciDevice(
+ vendor_id="dead",
+ product_id="beef",
+ address="0000:81:00.1",
+ parent_addr="0000:81:00.0",
+ numa_node=0,
+ dev_type="type-VF",
+ )
+ pci_dev2 = objects.PciDevice(
+ vendor_id="dead",
+ product_id="beef",
+ address="0000:81:00.2",
+ parent_addr="0000:81:00.0",
+ numa_node=0,
+ dev_type="type-VF",
+ )
+ # the two device matched by different device_specs with different
+ # resource_class and traits fields
+ pci_stats.add_device(pci_dev1)
+ pci_stats.add_device(pci_dev2)
+
+ # but they are put in the same pool as all the other fields are
+ # matching
+ self.assertEqual(1, len(pci_stats.pools))
+ self.assertEqual(2, pci_stats.pools[0]["count"])
+
+ def test_filter_pools_for_spec_ignores_rc_and_traits_in_spec(self):
+ """Assert that resource_class and traits are ignored in the pci
+ request spec during matching the request to pools.
+ """
+ pci_stats = stats.PciDeviceStats(objects.NUMATopology())
+ pools = [{"vendor_id": "dead", "product_id": "beef"}]
+
+ matching_pools = pci_stats._filter_pools_for_spec(
+ pools=pools,
+ request=objects.InstancePCIRequest(
+ spec=[
+ {
+ "vendor_id": "dead",
+ "product_id": "beef",
+ "resource_class": "foo",
+ "traits": "blue",
+ }
+ ]
+ ),
+ )
+
+ self.assertEqual(pools, matching_pools)
+
+ def test_populate_pools_metadata_from_assigned_devices(self):
+ device_spec = [
+ jsonutils.dumps(
+ {
+ "address": "0000:81:00.*",
+ }
+ ),
+ ]
+ self.flags(device_spec=device_spec, group="pci")
+ dev_filter = whitelist.Whitelist(device_spec)
+ pci_stats = stats.PciDeviceStats(
+ objects.NUMATopology(),
+ dev_filter=dev_filter)
+ pci_dev1 = objects.PciDevice(
+ vendor_id="dead",
+ product_id="beef",
+ address="0000:81:00.1",
+ parent_addr="0000:81:00.0",
+ numa_node=0,
+ dev_type="type-VF",
+ )
+ pci_dev2 = objects.PciDevice(
+ vendor_id="dead",
+ product_id="beef",
+ address="0000:81:00.2",
+ parent_addr="0000:81:00.0",
+ numa_node=0,
+ dev_type="type-VF",
+ )
+ pci_stats.add_device(pci_dev1)
+ pci_dev1.extra_info = {'rp_uuid': uuids.rp1}
+ pci_stats.add_device(pci_dev2)
+ pci_dev2.extra_info = {'rp_uuid': uuids.rp1}
+
+ self.assertEqual(1, len(pci_stats.pools))
+
+ pci_stats.populate_pools_metadata_from_assigned_devices()
+
+ self.assertEqual(uuids.rp1, pci_stats.pools[0]['rp_uuid'])
+
+ def test_populate_pools_metadata_from_assigned_devices_device_without_rp(
+ self
+ ):
+ device_spec = [
+ jsonutils.dumps(
+ {
+ "address": "0000:81:00.*",
+ }
+ ),
+ ]
+ self.flags(device_spec=device_spec, group="pci")
+ dev_filter = whitelist.Whitelist(device_spec)
+ pci_stats = stats.PciDeviceStats(
+ objects.NUMATopology(),
+ dev_filter=dev_filter)
+ pci_dev1 = objects.PciDevice(
+ vendor_id="dead",
+ product_id="beef",
+ address="0000:81:00.1",
+ parent_addr="0000:81:00.0",
+ numa_node=0,
+ dev_type="type-VF",
+ )
+ pci_stats.add_device(pci_dev1)
+
+ self.assertEqual(1, len(pci_stats.pools))
+
+ pci_stats.populate_pools_metadata_from_assigned_devices()
+
+ self.assertNotIn('rp_uuid', pci_stats.pools[0])
+
+ def test_populate_pools_metadata_from_assigned_devices_multiple_rp(self):
+ device_spec = [
+ jsonutils.dumps(
+ {
+ "address": "0000:81:00.*",
+ }
+ ),
+ ]
+ self.flags(device_spec=device_spec, group="pci")
+ dev_filter = whitelist.Whitelist(device_spec)
+ pci_stats = stats.PciDeviceStats(
+ objects.NUMATopology(),
+ dev_filter=dev_filter)
+ pci_dev1 = objects.PciDevice(
+ compute_node_id=1,
+ vendor_id="dead",
+ product_id="beef",
+ address="0000:81:00.1",
+ parent_addr="0000:81:00.0",
+ numa_node=0,
+ dev_type="type-VF",
+ )
+ pci_dev2 = objects.PciDevice(
+ compute_node_id=1,
+ vendor_id="dead",
+ product_id="beef",
+ address="0000:81:00.2",
+ parent_addr="0000:81:00.0",
+ numa_node=0,
+ dev_type="type-VF",
+ )
+ pci_stats.add_device(pci_dev1)
+ pci_dev1.extra_info = {'rp_uuid': uuids.rp1}
+ pci_stats.add_device(pci_dev2)
+ pci_dev2.extra_info = {'rp_uuid': uuids.rp2}
+
+ self.assertEqual(1, len(pci_stats.pools))
+
+ self.assertRaises(
+ ValueError,
+ pci_stats.populate_pools_metadata_from_assigned_devices,
+ )
+
+
+class PciDeviceStatsProviderMappingTestCase(test.NoDBTestCase):
+ def setUp(self):
+ super().setUp()
+ # for simplicity accept any devices
+ device_spec = [
+ jsonutils.dumps(
+ {
+ "address": "*:*:*.*",
+ }
+ ),
+ ]
+ self.flags(device_spec=device_spec, group="pci")
+ self.dev_filter = whitelist.Whitelist(device_spec)
+ self.pci_stats = stats.PciDeviceStats(
+ objects.NUMATopology(), dev_filter=self.dev_filter
+ )
+ # add devices represented by different RPs in placement
+ # two VFs on the same PF
+ self.vf1 = objects.PciDevice(
+ compute_node_id=1,
+ vendor_id="dead",
+ product_id="beef",
+ address="0000:81:00.1",
+ parent_addr="0000:81:00.0",
+ numa_node=0,
+ dev_type="type-VF",
+ )
+ self.vf2 = objects.PciDevice(
+ compute_node_id=1,
+ vendor_id="dead",
+ product_id="beef",
+ address="0000:81:00.2",
+ parent_addr="0000:81:00.0",
+ numa_node=0,
+ dev_type="type-VF",
+ )
+ self.pci_stats.add_device(self.vf1)
+ self.vf1.extra_info = {'rp_uuid': uuids.pf1}
+ self.pci_stats.add_device(self.vf2)
+ self.vf2.extra_info = {'rp_uuid': uuids.pf1}
+ # two PFs pf2 and pf3 (pf1 is used for the paren of the above VFs)
+ self.pf2 = objects.PciDevice(
+ compute_node_id=1,
+ vendor_id="dead",
+ product_id="beef",
+ address="0000:82:00.0",
+ parent_addr=None,
+ numa_node=0,
+ dev_type="type-PF",
+ )
+ self.pci_stats.add_device(self.pf2)
+ self.pf2.extra_info = {'rp_uuid': uuids.pf2}
+
+ self.pf3 = objects.PciDevice(
+ compute_node_id=1,
+ vendor_id="dead",
+ product_id="beef",
+ address="0000:83:00.0",
+ parent_addr=None,
+ numa_node=0,
+ dev_type="type-PF",
+ )
+ self.pci_stats.add_device(self.pf3)
+ self.pf3.extra_info = {'rp_uuid': uuids.pf3}
+ # a PCI
+ self.pci1 = objects.PciDevice(
+ compute_node_id=1,
+ vendor_id="dead",
+ product_id="beef",
+ address="0000:84:00.0",
+ parent_addr=None,
+ numa_node=0,
+ dev_type="type-PCI",
+ )
+ self.pci_stats.add_device(self.pci1)
+ self.pci1.extra_info = {'rp_uuid': uuids.pci1}
+
+ # populate the RP -> pool mapping from the devices to its pools
+ self.pci_stats.populate_pools_metadata_from_assigned_devices()
+
+ # we have 1 pool for the two VFs then the rest has it own pool one by
+ # one
+ self.num_pools = 4
+ self.assertEqual(self.num_pools, len(self.pci_stats.pools))
+ self.num_devs = 5
+ self.assertEqual(
+ self.num_devs, sum(pool["count"] for pool in self.pci_stats.pools)
+ )
+
+ def test_support_request_unrestricted(self):
+ reqs = []
+ for dev_type in ["type-VF", "type-PF", "type-PCI"]:
+ req = objects.InstancePCIRequest(
+ count=1,
+ alias_name='a-dev',
+ spec=[
+ {
+ "vendor_id": "dead",
+ "product_id": "beef",
+ "dev_type": dev_type,
+ }
+ ],
+ )
+ reqs.append(req)
+
+ # an empty mapping means unrestricted by any provider
+ # we have devs for all type so each request should fit
+ self.assertTrue(self.pci_stats.support_requests(reqs, {}))
+
+ # the support_requests call is expected not to consume any device
+ self.assertEqual(self.num_pools, len(self.pci_stats.pools))
+ self.assertEqual(
+ self.num_devs, sum(pool["count"] for pool in self.pci_stats.pools)
+ )
+
+ # now apply the same request to consume the pools
+ self.pci_stats.apply_requests(reqs, {})
+ # we have consumed a 3 devs (a VF, a PF, and a PCI)
+ self.assertEqual(
+ self.num_devs - 3,
+ sum(pool["count"] for pool in self.pci_stats.pools),
+ )
+ # the empty pools are purged. We have one pool for the remaining VF
+ # and the remaining PF
+ self.assertEqual(2, len(self.pci_stats.pools))
+
+ def test_support_request_restricted_by_provider_mapping(self):
+ pf_req = objects.InstancePCIRequest(
+ count=1,
+ alias_name='a-dev',
+ request_id=uuids.req1,
+ spec=[
+ {
+ "vendor_id": "dead",
+ "product_id": "beef",
+ "dev_type": "type-PF",
+ }
+ ],
+ )
+
+ # simulate the placement restricted the possible RPs to pf3
+ self.assertTrue(
+ self.pci_stats.support_requests(
+ [pf_req], {f"{uuids.req1}-0": [uuids.pf3]}
+ )
+ )
+
+ # the support_requests call is expected not to consume any device
+ self.assertEqual(self.num_pools, len(self.pci_stats.pools))
+ self.assertEqual(
+ self.num_devs, sum(pool["count"] for pool in self.pci_stats.pools)
+ )
+
+ # now apply the request and see if the right device is consumed
+ self.pci_stats.apply_requests(
+ [pf_req], {f"{uuids.req1}-0": [uuids.pf3]}
+ )
+
+ self.assertEqual(self.num_pools - 1, len(self.pci_stats.pools))
+ self.assertEqual(
+ self.num_devs - 1,
+ sum(pool["count"] for pool in self.pci_stats.pools),
+ )
+ # pf3 is not available in the pools any more
+ self.assertEqual(
+ {uuids.pf1, uuids.pf2, uuids.pci1},
+ {pool['rp_uuid'] for pool in self.pci_stats.pools},
+ )
+
+ def test_support_request_restricted_by_provider_mapping_does_not_fit(self):
+ pf_req = objects.InstancePCIRequest(
+ count=1,
+ alias_name='a-dev',
+ request_id=uuids.req1,
+ spec=[
+ {
+ "vendor_id": "dead",
+ "product_id": "beef",
+ "dev_type": "type-PF",
+ }
+ ],
+ )
+
+ # Simulate that placement returned an allocation candidate with a PF
+ # that is not in the pools anymore, e.g. filtered out by numa cell.
+ # We expect the request to fail
+ self.assertFalse(
+ self.pci_stats.support_requests(
+ [pf_req], {f"{uuids.req1}-0": [uuids.pf4]}
+ )
+ )
+ self.assertRaises(
+ exception.PciDeviceRequestFailed,
+ self.pci_stats.apply_requests,
+ [pf_req],
+ {f"{uuids.req1}-0": [uuids.pf4]},
+ )
+ # and the pools are not changed
+ self.assertEqual(self.num_pools, len(self.pci_stats.pools))
+ self.assertEqual(
+ self.num_devs, sum(pool["count"] for pool in self.pci_stats.pools)
+ )
+
+ def test_support_request_neutron_port_based_request_ignore_mapping(self):
+ # by not having the alias_name set this becomes a neutron port based
+ # PCI request
+ pf_req = objects.InstancePCIRequest(
+ count=1,
+ request_id=uuids.req1,
+ spec=[
+ {
+ "vendor_id": "dead",
+ "product_id": "beef",
+ "dev_type": "type-PF",
+ }
+ ],
+ )
+
+ # Simulate that placement returned an allocation candidate with a PF
+ # that is not in the pools anymore, e.g. filtered out by numa cell.
+ # We expect that the placement selection is ignored for neutron port
+ # based requests so this request should fit as we have PFs in the pools
+ self.assertTrue(
+ self.pci_stats.support_requests(
+ [pf_req], {f"{uuids.req1}-0": [uuids.pf4]}
+ )
+ )
+ self.pci_stats.apply_requests(
+ [pf_req],
+ {f"{uuids.req1}-0": [uuids.pf4]},
+ )
+ # and a PF is consumed
+ self.assertEqual(self.num_pools - 1, len(self.pci_stats.pools))
+ self.assertEqual(
+ self.num_devs - 1,
+ sum(pool["count"] for pool in self.pci_stats.pools),
+ )
+
+ def test_support_request_req_with_count_2(self):
+ # now ask for two PFs in a single request
+ pf_req = objects.InstancePCIRequest(
+ count=2,
+ alias_name='a-dev',
+ request_id=uuids.req1,
+ spec=[
+ {
+ "vendor_id": "dead",
+ "product_id": "beef",
+ "dev_type": "type-PF",
+ }
+ ],
+ )
+
+ # Simulate that placement returned one candidate RP for both PF reqs
+ mapping = {
+ f"{uuids.req1}-0": [uuids.pf2],
+ f"{uuids.req1}-1": [uuids.pf3],
+ }
+ # so the request fits
+ self.assertTrue(self.pci_stats.support_requests([pf_req], mapping))
+ self.pci_stats.apply_requests([pf_req], mapping)
+ # and both PFs are consumed
+ self.assertEqual(self.num_pools - 2, len(self.pci_stats.pools))
+ self.assertEqual(
+ self.num_devs - 2,
+ sum(pool["count"] for pool in self.pci_stats.pools),
+ )
+ self.assertEqual(
+ {uuids.pf1, uuids.pci1},
+ {pool['rp_uuid'] for pool in self.pci_stats.pools},
+ )
+
+ def test_support_requests_multiple_reqs(self):
+ # request both a VF and a PF
+ vf_req = objects.InstancePCIRequest(
+ count=1,
+ alias_name='a-dev',
+ request_id=uuids.vf_req,
+ spec=[
+ {
+ "vendor_id": "dead",
+ "product_id": "beef",
+ "dev_type": "type-VF",
+ }
+ ],
+ )
+ pf_req = objects.InstancePCIRequest(
+ count=1,
+ alias_name='a-dev',
+ request_id=uuids.pf_req,
+ spec=[
+ {
+ "vendor_id": "dead",
+ "product_id": "beef",
+ "dev_type": "type-PF",
+ }
+ ],
+ )
+
+ # Simulate that placement returned one candidate RP for both reqs
+ mapping = {
+ # the VF is represented by the parent PF RP
+ f"{uuids.vf_req}-0": [uuids.pf1],
+ f"{uuids.pf_req}-0": [uuids.pf3],
+ }
+ # so the request fits
+ self.assertTrue(
+ self.pci_stats.support_requests([vf_req, pf_req], mapping)
+ )
+ self.pci_stats.apply_requests([vf_req, pf_req], mapping)
+ # and the proper devices are consumed
+ # Note that the VF pool still has a device so it remains
+ self.assertEqual(self.num_pools - 1, len(self.pci_stats.pools))
+ self.assertEqual(
+ self.num_devs - 2,
+ sum(pool["count"] for pool in self.pci_stats.pools),
+ )
+ self.assertEqual(
+ {uuids.pf1, uuids.pf2, uuids.pci1},
+ {pool['rp_uuid'] for pool in self.pci_stats.pools},
+ )
+
+ def test_apply_gets_requested_uuids_from_pci_req(self):
+ pf_req = objects.InstancePCIRequest(
+ count=1,
+ alias_name='a-dev',
+ request_id=uuids.req1,
+ spec=[
+ {
+ "vendor_id": "dead",
+ "product_id": "beef",
+ "dev_type": "type-PF",
+ # Simulate that the scheduler already allocate a candidate
+ # and the mapping is stored in the request.
+ # The allocation restricts that we can only consume from
+ # PF3
+ "rp_uuids": ",".join([uuids.pf3])
+ }
+ ],
+ )
+
+ # call apply with None mapping signalling that the allocation is
+ # already done and the resulted mapping is stored in the request
+ self.pci_stats.apply_requests([pf_req], provider_mapping=None)
+
+ # assert that the right device is consumed
+ self.assertEqual(self.num_pools - 1, len(self.pci_stats.pools))
+ self.assertEqual(
+ self.num_devs - 1,
+ sum(pool["count"] for pool in self.pci_stats.pools),
+ )
+ # pf3 is not available in the pools anymore
+ self.assertEqual(
+ {uuids.pf1, uuids.pf2, uuids.pci1},
+ {pool['rp_uuid'] for pool in self.pci_stats.pools},
+ )
+
+ def _create_two_pools_with_two_vfs(self):
+ # create two pools (PFs) with two VFs each
+ self.pci_stats = stats.PciDeviceStats(
+ objects.NUMATopology(), dev_filter=self.dev_filter
+ )
+ for pf_index in [1, 2]:
+ for vf_index in [1, 2]:
+ dev = objects.PciDevice(
+ compute_node_id=1,
+ vendor_id="dead",
+ product_id="beef",
+ address=f"0000:81:0{pf_index}.{vf_index}",
+ parent_addr=f"0000:81:0{pf_index}.0",
+ numa_node=0,
+ dev_type="type-VF",
+ )
+ self.pci_stats.add_device(dev)
+ dev.extra_info = {'rp_uuid': getattr(uuids, f"pf{pf_index}")}
+
+ # populate the RP -> pool mapping from the devices to its pools
+ self.pci_stats.populate_pools_metadata_from_assigned_devices()
+
+ # we have 2 pool and 4 devs in total
+ self.num_pools = 2
+ self.assertEqual(self.num_pools, len(self.pci_stats.pools))
+ self.num_devs = 4
+ self.assertEqual(
+ self.num_devs, sum(pool["count"] for pool in self.pci_stats.pools)
+ )
+
+ def test_apply_asymmetric_allocation(self):
+ self._create_two_pools_with_two_vfs()
+ # ask for 3 VFs
+ vf_req = objects.InstancePCIRequest(
+ count=3,
+ alias_name='a-vf',
+ request_id=uuids.vf_req,
+ spec=[
+ {
+ "vendor_id": "dead",
+ "product_id": "beef",
+ "dev_type": "type-VF",
+ }
+ ],
+ )
+
+ # Simulate that placement returned an allocation candidate where 1 VF
+ # is consumed from PF1 and two from PF2
+ mapping = {
+ # the VF is represented by the parent PF RP
+ f"{uuids.vf_req}-0": [uuids.pf1],
+ f"{uuids.vf_req}-1": [uuids.pf2],
+ f"{uuids.vf_req}-2": [uuids.pf2],
+ }
+ # This should fit
+ self.assertTrue(
+ self.pci_stats.support_requests([vf_req], mapping)
+ )
+ # and when consumed the consumption from the pools should be in sync
+ # with the placement allocation. So the PF2 pool is expected to
+ # disappear as it is fully consumed and the PF1 pool should have
+ # one free device.
+ self.pci_stats.apply_requests([vf_req], mapping)
+ self.assertEqual(1, len(self.pci_stats.pools))
+ self.assertEqual(uuids.pf1, self.pci_stats.pools[0]['rp_uuid'])
+ self.assertEqual(1, self.pci_stats.pools[0]['count'])
+
+ def test_consume_asymmetric_allocation(self):
+ self._create_two_pools_with_two_vfs()
+ # ask for 3 VFs
+ vf_req = objects.InstancePCIRequest(
+ count=3,
+ alias_name='a-vf',
+ request_id=uuids.vf_req,
+ spec=[
+ {
+ "vendor_id": "dead",
+ "product_id": "beef",
+ "dev_type": "type-VF",
+ # Simulate that the scheduler already allocate a candidate
+ # and the mapping is stored in the request.
+ # In placement 1 VF is allocated from PF1 and two from PF2
+ "rp_uuids": ",".join([uuids.pf1, uuids.pf2, uuids.pf2])
+ }
+ ],
+ )
+
+ # So when the PCI claim consumes devices based on this request we
+ # expect that nova follows what is allocated in placement.
+ devs = self.pci_stats.consume_requests([vf_req])
+ self.assertEqual(
+ {"0000:81:01.0": 1, "0000:81:02.0": 2},
+ collections.Counter(dev.parent_addr for dev in devs),
+ )
+
+ def test_consume_restricted_by_allocation(self):
+ pf_req = objects.InstancePCIRequest(
+ count=1,
+ alias_name='a-dev',
+ request_id=uuids.req1,
+ spec=[
+ {
+ "vendor_id": "dead",
+ "product_id": "beef",
+ "dev_type": "type-PF",
+ # Simulate that the scheduler already allocate a candidate
+ # and the mapping is stored in the request.
+ # The allocation restricts that we can only consume from
+ # PF3
+ "rp_uuids": ",".join([uuids.pf3])
+ }
+ ],
+ )
+
+ # Call consume. It always expects the allocated mapping to be stores
+ # the in PCI request as it is always called from the compute side.
+ consumed_devs = self.pci_stats.consume_requests([pf_req])
+ # assert that the right device is consumed
+ self.assertEqual([self.pf3], consumed_devs)
+ # pf3 is not available in the pools anymore
+ self.assertEqual(
+ {uuids.pf1, uuids.pf2, uuids.pci1},
+ {
+ pool["rp_uuid"]
+ for pool in self.pci_stats.pools
+ if pool["count"] > 0
+ },
+ )
class PciDeviceVFPFStatsTestCase(test.NoDBTestCase):
def setUp(self):
super(PciDeviceVFPFStatsTestCase, self).setUp()
- white_list = ['{"vendor_id":"8086","product_id":"1528"}',
- '{"vendor_id":"8086","product_id":"1515"}']
- self.flags(passthrough_whitelist=white_list, group='pci')
+ device_spec = [
+ jsonutils.dumps({"vendor_id": "8086", "product_id": "1528"}),
+ jsonutils.dumps({"vendor_id": "8086", "product_id": "1515"}),
+ jsonutils.dumps(
+ {
+ "vendor_id": "15b3",
+ "product_id": "a2d6",
+ "remote_managed": "false",
+ }
+ ),
+ jsonutils.dumps(
+ {
+ "vendor_id": "15b3",
+ "product_id": "101e",
+ "remote_managed": "true",
+ }
+ ),
+ ]
+ self.flags(device_spec=device_spec, group='pci')
self.pci_stats = stats.PciDeviceStats(objects.NUMATopology())
def _create_pci_devices(self, vf_product_id=1515, pf_product_id=1528):
@@ -644,6 +1557,26 @@ class PciDeviceVFPFStatsTestCase(test.NoDBTestCase):
dev_obj.child_devices = []
self.sriov_pf_devices.append(dev_obj)
+ # PF devices for remote_managed VFs.
+ self.sriov_pf_devices_remote = []
+ for dev in range(2):
+ pci_dev = {
+ 'compute_node_id': 1,
+ 'address': '0001:81:00.%d' % dev,
+ 'vendor_id': '15b3',
+ 'product_id': 'a2d6',
+ 'status': 'available',
+ 'request_id': None,
+ 'dev_type': fields.PciDeviceType.SRIOV_PF,
+ 'parent_addr': None,
+ 'numa_node': 0,
+ "capabilities": {"vpd": {
+ "card_serial_number": "MT2113X00000"}},
+ }
+ dev_obj = objects.PciDevice.create(None, pci_dev)
+ dev_obj.child_devices = []
+ self.sriov_pf_devices_remote.append(dev_obj)
+
self.sriov_vf_devices = []
for dev in range(8):
pci_dev = {
@@ -662,6 +1595,25 @@ class PciDeviceVFPFStatsTestCase(test.NoDBTestCase):
dev_obj.parent_device.child_devices.append(dev_obj)
self.sriov_vf_devices.append(dev_obj)
+ self.sriov_vf_devices_remote = []
+ for dev in range(8):
+ pci_dev = {
+ 'compute_node_id': 1,
+ 'address': '0001:81:10.%d' % dev,
+ 'vendor_id': '15b3',
+ 'product_id': '101e',
+ 'status': 'available',
+ 'request_id': None,
+ 'dev_type': fields.PciDeviceType.SRIOV_VF,
+ 'parent_addr': '0001:81:00.%d' % int(dev / 4),
+ 'numa_node': 0,
+ "capabilities": {"vpd": {"card_serial_number": "MT2113X00000"}}
+ }
+ dev_obj = objects.PciDevice.create(None, pci_dev)
+ dev_obj.parent_device = self.sriov_pf_devices_remote[int(dev / 4)]
+ dev_obj.parent_device.child_devices.append(dev_obj)
+ self.sriov_vf_devices_remote.append(dev_obj)
+
self.vdpa_devices = []
for dev in range(8):
pci_dev = {
@@ -683,6 +1635,8 @@ class PciDeviceVFPFStatsTestCase(test.NoDBTestCase):
list(map(self.pci_stats.add_device, self.sriov_pf_devices))
list(map(self.pci_stats.add_device, self.sriov_vf_devices))
list(map(self.pci_stats.add_device, self.vdpa_devices))
+ list(map(self.pci_stats.add_device, self.sriov_pf_devices_remote))
+ list(map(self.pci_stats.add_device, self.sriov_vf_devices_remote))
def test_consume_VDPA_requests(self):
self._create_pci_devices()
@@ -726,7 +1680,8 @@ class PciDeviceVFPFStatsTestCase(test.NoDBTestCase):
free_devs = self.pci_stats.get_free_devs()
# Validate that there are no free devices left, as when allocating
# both available PFs, its VFs should not be available.
- self.assertEqual(0, len(free_devs))
+ self.assertEqual(0, len([d for d in free_devs
+ if d.product_id == '1515']))
def test_consume_VF_and_PF_requests(self):
self._create_pci_devices()
@@ -747,10 +1702,85 @@ class PciDeviceVFPFStatsTestCase(test.NoDBTestCase):
objects.InstancePCIRequest(count=1,
spec=[{'product_id': '1528',
'dev_type': 'type-PF'}])]
- self.assertIsNone(self.pci_stats.consume_requests(pci_requests))
+ self.assertRaises(
+ exception.PciDeviceRequestFailed,
+ self.pci_stats.consume_requests,
+ pci_requests,
+ )
- def test_consume_VF_and_PF_same_prodict_id_failed(self):
+ def test_consume_VF_and_PF_same_product_id_failed(self):
self._create_pci_devices(pf_product_id=1515)
pci_requests = [objects.InstancePCIRequest(count=9,
spec=[{'product_id': '1515'}])]
- self.assertIsNone(self.pci_stats.consume_requests(pci_requests))
+ self.assertRaises(
+ exception.PciDeviceRequestFailed,
+ self.pci_stats.consume_requests,
+ pci_requests,
+ )
+
+ def test_consume_PF_not_remote_managed(self):
+ self._create_pci_devices()
+ pci_requests = [objects.InstancePCIRequest(count=2,
+ spec=[{'product_id': '1528',
+ 'dev_type': 'type-PF',
+ PCI_REMOTE_MANAGED_TAG: 'false'}])]
+ devs = self.pci_stats.consume_requests(pci_requests)
+ self.assertEqual(2, len(devs))
+ self.assertEqual(set(['1528']),
+ set([dev.product_id for dev in devs]))
+ free_devs = self.pci_stats.get_free_devs()
+ # Validate that there are no free devices left with the
+ # product ID under test, as when allocating both available
+ # PFs, its VFs should not be available.
+ self.assertEqual(0, len([d for d in free_devs
+ if d.product_id == '1528']))
+
+ def test_consume_VF_requests_remote_managed(self):
+ self._create_pci_devices()
+ pci_requests = [objects.InstancePCIRequest(count=2,
+ spec=[{PCI_REMOTE_MANAGED_TAG: 'true'}])]
+ devs = self.pci_stats.consume_requests(pci_requests)
+ self.assertEqual(2, len(devs))
+ self.assertEqual(set(['101e']),
+ set([dev.product_id for dev in devs]))
+ free_devs = self.pci_stats.get_free_devs()
+ # Validate that the parents of these VFs has been removed
+ # from pools.
+ for dev in devs:
+ self.assertNotIn(dev.parent_addr,
+ [free_dev.address for free_dev in free_devs])
+
+ def test_consume_VF_requests_remote_managed_filtered(self):
+ self._create_pci_devices()
+ pci_requests = [objects.InstancePCIRequest(count=1,
+ spec=[{'product_id': '101e',
+ PCI_REMOTE_MANAGED_TAG: 'false'}]),
+ objects.InstancePCIRequest(count=1,
+ spec=[{'product_id': '101e'}])]
+ free_devs_before = self.pci_stats.get_free_devs()
+ self.assertRaises(
+ exception.PciDeviceRequestFailed,
+ self.pci_stats.consume_requests,
+ pci_requests,
+ )
+ free_devs_after = self.pci_stats.get_free_devs()
+ self.assertEqual(free_devs_before, free_devs_after)
+
+ def test_consume_VF_requests_remote_managed_mix(self):
+ self._create_pci_devices()
+ pci_requests = [objects.InstancePCIRequest(count=1,
+ spec=[{'product_id': '101e',
+ PCI_REMOTE_MANAGED_TAG: 'true'}]),
+ objects.InstancePCIRequest(count=1,
+ spec=[{'product_id': '1515',
+ PCI_REMOTE_MANAGED_TAG: 'false'}])]
+ devs = self.pci_stats.consume_requests(pci_requests)
+ self.assertEqual(2, len(devs))
+ self.assertEqual(set(['101e', '1515']),
+ set([dev.product_id for dev in devs]))
+ free_devs = self.pci_stats.get_free_devs()
+ # Validate that the parents of these VFs has been removed
+ # from pools.
+ for dev in devs:
+ self.assertNotIn(dev.parent_addr,
+ [free_dev.address for free_dev in free_devs])
diff --git a/nova/tests/unit/pci/test_utils.py b/nova/tests/unit/pci/test_utils.py
index e444f13729..1a1f9955b9 100644
--- a/nova/tests/unit/pci/test_utils.py
+++ b/nova/tests/unit/pci/test_utils.py
@@ -16,9 +16,9 @@
import glob
import os
+from unittest import mock
import fixtures
-import mock
from nova import exception
from nova.pci import utils
@@ -44,7 +44,7 @@ class PciDeviceMatchTestCase(test.NoDBTestCase):
[{'vendor_id': 'v1', 'device_id': 'd1'},
{'vendor_id': 'v3', 'device_id': 'd3'}]))
- def test_spec_dismatch(self):
+ def test_spec_mismatch(self):
self.assertFalse(utils.pci_device_prop_match(
self.fake_pci_1,
[{'vendor_id': 'v4', 'device_id': 'd4'},
@@ -239,7 +239,7 @@ class GetVfNumByPciAddressTestCase(test.NoDBTestCase):
mock_iglob.return_value = self.paths
mock_readlink.return_value = '../../0000:00:00.1'
vf_num = utils.get_vf_num_by_pci_address(self.pci_address)
- self.assertEqual(vf_num, '3')
+ self.assertEqual(vf_num, 3)
@mock.patch.object(os, 'readlink')
@mock.patch.object(glob, 'iglob')
@@ -251,3 +251,180 @@ class GetVfNumByPciAddressTestCase(test.NoDBTestCase):
utils.get_vf_num_by_pci_address,
self.pci_address
)
+
+
+class GetProductIDByPfPciAddressTestCase(test.NoDBTestCase):
+ def setUp(self):
+ super().setUp()
+ self.pci_address = "0000:0a:00.0"
+
+ @mock.patch(
+ "builtins.open",
+ new=mock.MagicMock(
+ side_effect=(
+ lambda f: {
+ "/sys/bus/pci/devices/0000:0a:00.0/sriov_vf_device":
+ mock.mock_open(
+ read_data="101e\n"
+ )()
+ }.get(f)
+ )
+ ),
+ )
+ def test_sriov_vf_device_read(self):
+ product_id = utils.get_vf_product_id_by_pf_addr(self.pci_address)
+ self.assertEqual(product_id, "101e")
+
+ @mock.patch(
+ "builtins.open",
+ new=mock.MagicMock(
+ side_effect=(
+ lambda f: {
+ "/sys/bus/pci/devices/0000:0a:00.0/sriov_vf_device":
+ mock.mock_open(
+ read_data=""
+ )()
+ }.get(f)
+ )
+ ),
+ )
+ def test_sriov_vf_device_read_value_error(self):
+ self.assertRaises(
+ ValueError,
+ utils.get_vf_product_id_by_pf_addr,
+ self.pci_address,
+ )
+
+ @mock.patch(
+ "builtins.open",
+ new=mock.MagicMock(
+ side_effect=(
+ lambda f: {
+ "/sys/bus/pci/devices/0000:0a:00.0/sriov_vf_device":
+ mock.mock_open(
+ mock=mock.MagicMock(side_effect=IOError())
+ )()
+ }.get(f)
+ )
+ ),
+ )
+ def test_sriov_vf_device_read_io_error(self):
+ self.assertRaises(
+ ValueError,
+ utils.get_vf_product_id_by_pf_addr,
+ self.pci_address,
+ )
+
+
+class GetPciIdsByPciAddressTestCase(test.NoDBTestCase):
+ def setUp(self):
+ super().setUp()
+ self.pci_address = "0000:0a:00.0"
+
+ @mock.patch(
+ "builtins.open",
+ new=mock.MagicMock(
+ side_effect=(
+ lambda f: {
+ "/sys/bus/pci/devices/0000:0a:00.0/vendor":
+ mock.mock_open(
+ read_data="0x15b3\n"
+ )(),
+ "/sys/bus/pci/devices/0000:0a:00.0/product":
+ mock.mock_open(
+ read_data="0x101e\n"
+ )(),
+ }.get(f)
+ )
+ ),
+ )
+ def test_get_pci_ids(self):
+ self.assertEqual(
+ utils.get_pci_ids_by_pci_addr(self.pci_address), ("15b3", "101e")
+ )
+
+ @mock.patch(
+ "builtins.open",
+ new=mock.MagicMock(
+ side_effect=(
+ lambda f: {
+ "/sys/bus/pci/devices/0000:0a:00.0/vendor": mock.mock_open(
+ read_data=""
+ )(),
+ }.get(f)
+ )
+ ),
+ )
+ def test_get_pci_ids_value_error_vendor(self):
+ self.assertRaises(
+ ValueError,
+ utils.get_pci_ids_by_pci_addr,
+ self.pci_address,
+ )
+
+ @mock.patch(
+ "builtins.open",
+ new=mock.MagicMock(
+ side_effect=(
+ lambda f: {
+ "/sys/bus/pci/devices/0000:0a:00.0/vendor":
+ mock.mock_open(
+ read_data="0x15b3\n"
+ )(),
+ "/sys/bus/pci/devices/0000:0a:00.0/product":
+ mock.mock_open(
+ read_data=""
+ )(),
+ }.get(f)
+ )
+ ),
+ )
+ def test_get_pci_ids_value_error_product(self):
+ self.assertRaises(
+ ValueError,
+ utils.get_pci_ids_by_pci_addr,
+ self.pci_address,
+ )
+
+ @mock.patch(
+ "builtins.open",
+ new=mock.MagicMock(
+ side_effect=(
+ lambda f: {
+ "/sys/bus/pci/devices/0000:0a:00.0/vendor": mock.mock_open(
+ mock=mock.MagicMock(side_effect=IOError())
+ )()
+ }.get(f)
+ )
+ ),
+ )
+ def test_get_pci_ids_io_error_vendor(self):
+ self.assertRaises(
+ ValueError,
+ utils.get_pci_ids_by_pci_addr,
+ self.pci_address,
+ )
+
+ @mock.patch(
+ "builtins.open",
+ new=mock.MagicMock(
+ side_effect=(
+ lambda f: {
+ "/sys/bus/pci/devices/0000:0a:00.0/vendor":
+ mock.mock_open(
+ read_data="0x15b3\n"
+ )(),
+ "/sys/bus/pci/devices/0000:0a:00.0/product":
+ mock.mock_open(
+ mock=mock.MagicMock(side_effect=IOError())
+ )(),
+ }.get(f)
+ )
+ ),
+ )
+ def test_get_pci_ids_io_error_product(self):
+ self.assertRaises(
+ ValueError,
+ utils.get_pci_ids_by_pci_addr,
+ self.pci_address,
+ )
diff --git a/nova/tests/unit/policies/base.py b/nova/tests/unit/policies/base.py
index aff01d14f6..7490441d92 100644
--- a/nova/tests/unit/policies/base.py
+++ b/nova/tests/unit/policies/base.py
@@ -24,6 +24,23 @@ from nova.tests import fixtures
LOG = logging.getLogger(__name__)
+def rule_if_system(system_rule, non_system_rule, context):
+ """Helper function to pick a rule based on system-ness of context.
+
+ This can be used (with functools.partial) to choose between two
+ rule names, based on whether or not the context has system
+ scope. Specifically if we will fail the parent of a nested policy
+ check based on scope_types=['project'], this can be used to choose
+ the parent rule name for the error message check in
+ common_policy_check().
+
+ """
+ if context.system_scope:
+ return system_rule
+ else:
+ return non_system_rule
+
+
class BasePolicyTest(test.TestCase):
# NOTE(gmann): Set this flag to True if you would like to tests the
# new behaviour of policy without deprecated rules.
@@ -36,11 +53,21 @@ class BasePolicyTest(test.TestCase):
# For Example:
# rules_without_deprecation{
# "os_compute_api:os-deferred-delete:restore":
- # "rule:system_admin_or_owner"}
+ # "rule:project_admin_api"}
rules_without_deprecation = {}
def setUp(self):
super(BasePolicyTest, self).setUp()
+ # TODO(gmann): enforce_scope and enforce_new_defaults are enabled
+ # by default in the code so disable them in base test class until
+ # we have deprecated rules and their tests. We have enforce_scope
+ # and no-legacy tests which are explicitly enabling scope and new
+ # defaults to test the new defaults and scope. In future, once
+ # we remove the deprecated rules, along with refactoring the unit
+ # tests we can remove overriding the oslo policy flags.
+ self.flags(enforce_scope=False, group="oslo_policy")
+ if not self.without_deprecated_rules:
+ self.flags(enforce_new_defaults=False, group="oslo_policy")
self.useFixture(fixtures.NeutronFixture(self))
self.policy = self.useFixture(fixtures.RealPolicyFixture())
@@ -95,33 +122,135 @@ class BasePolicyTest(test.TestCase):
project_id=self.project_id_other,
roles=['reader'])
- self.all_contexts = [
+ self.all_contexts = set([
self.legacy_admin_context, self.system_admin_context,
self.system_member_context, self.system_reader_context,
self.system_foo_context,
self.project_admin_context, self.project_member_context,
self.project_reader_context, self.other_project_member_context,
self.project_foo_context, self.other_project_reader_context
- ]
+ ])
+
+ # All the project contexts for easy access.
+ self.all_project_contexts = set([
+ self.legacy_admin_context,
+ self.project_admin_context, self.project_member_context,
+ self.project_reader_context, self.project_foo_context,
+ self.other_project_member_context,
+ self.other_project_reader_context,
+ ])
+ # All the system contexts for easy access.
+ self.all_system_contexts = set([
+ self.system_admin_context, self.system_foo_context,
+ self.system_member_context, self.system_reader_context,
+ ])
+ # A few commmon set of contexts to be used in tests
+ #
+ # With scope disable and no legacy rule, any admin,
+ # project members have access. No other role in that project
+ # will have access.
+ self.project_member_or_admin_with_no_scope_no_legacy = set([
+ self.legacy_admin_context, self.system_admin_context,
+ self.project_admin_context, self.project_member_context,
+ ])
+ # With scope enable and legacy rule, only project scoped admin
+ # and any role in that project will have access.
+ self.project_m_r_or_admin_with_scope_and_legacy = set([
+ self.legacy_admin_context, self.project_admin_context,
+ self.project_member_context, self.project_reader_context,
+ self.project_foo_context
+ ])
+ # With scope enable and no legacy rule, only project scoped admin
+ # and project members have access. No other role in that project
+ # or system scoped token will have access.
+ self.project_member_or_admin_with_scope_no_legacy = set([
+ self.legacy_admin_context, self.project_admin_context,
+ self.project_member_context
+ ])
+ # With scope disable and no legacy rule, any admin,
+ # project members, and project reader have access. No other
+ # role in that project will have access.
+ self.project_reader_or_admin_with_no_scope_no_legacy = set([
+ self.legacy_admin_context, self.system_admin_context,
+ self.project_admin_context, self.project_member_context,
+ self.project_reader_context
+ ])
+ # With scope enable and no legacy rule, only project scoped admin,
+ # project members, and project reader have access. No other role
+ # in that project or system scoped token will have access.
+ self.project_reader_or_admin_with_scope_no_legacy = set([
+ self.legacy_admin_context, self.project_admin_context,
+ self.project_member_context, self.project_reader_context
+ ])
if self.without_deprecated_rules:
# To simulate the new world, remove deprecations by overriding
# rules which has the deprecated rules.
self.rules_without_deprecation.update({
- "system_admin_or_owner":
- "rule:system_admin_api or rule:project_member_api",
- "system_or_project_reader":
- "rule:system_reader_api or rule:project_reader_api",
- "system_admin_api":
- "role:admin and system_scope:all",
- "system_reader_api":
- "role:reader and system_scope:all",
+ "context_is_admin":
+ "role:admin",
+ "project_reader_or_admin":
+ "rule:project_reader_api or rule:context_is_admin",
+ "project_admin_api":
+ "role:admin and project_id:%(project_id)s",
"project_member_api":
"role:member and project_id:%(project_id)s",
+ "project_reader_api":
+ "role:reader and project_id:%(project_id)s",
+ "project_member_or_admin":
+ "rule:project_member_api or rule:context_is_admin",
+ "project_reader_or_admin":
+ "rule:project_reader_api or rule:context_is_admin",
})
self.policy.set_rules(self.rules_without_deprecation,
overwrite=False)
+ def reduce_set(self, name, new_set):
+ """Reduce a named set of contexts in a subclass.
+
+ This removes things from a set in a child test class by taking
+ a new set, but asserts that no *new* contexts are added over
+ what is defined in the parent.
+
+ :param name: The name of a set of contexts on self
+ (i.e. 'project' for self.project_contexts
+ :param new_set: The new set of contexts that should be used in
+ the above set. The new_set is asserted to be a
+ perfect subset of the existing set
+ """
+ current = getattr(self, '%s_contexts' % name)
+
+ errors = ','.join(x.user_id for x in new_set - current)
+ self.assertEqual('', errors,
+ 'Attempt to reduce set would add %s' % errors)
+
+ LOG.info('%s.%s_contexts: removing %s',
+ self.__class__.__name__,
+ name,
+ ','.join(x.user_id for x in current - new_set))
+ setattr(self, '%s_contexts' % name, new_set)
+
+ def common_policy_auth(self, authorized_contexts,
+ rule_name,
+ func, req, *arg, **kwarg):
+ """Check a policy rule against a set of authorized contexts.
+
+ This is exactly like common_policy_check, except that it
+ assumes any contexts not in the authorized set are in the
+ unauthorized set.
+ """
+ # The unauthorized users are any not in the authorized set.
+ unauth = list(set(self.all_contexts) - set(authorized_contexts))
+ # In case a set was passed in, convert to list for stable ordering.
+ authorized_contexts = list(authorized_contexts)
+ # Log both sets in the order we will test them to aid debugging of
+ # fatal=False responses.
+ LOG.info('Authorized users: %s', list(
+ x.user_id for x in authorized_contexts))
+ LOG.info('Unauthorized users: %s', list(x.user_id for x in unauth))
+ return self.common_policy_check(authorized_contexts, unauth,
+ rule_name, func, req, *arg, **kwarg)
+
def common_policy_check(self, authorized_contexts,
unauthorized_contexts, rule_name,
func, req, *arg, **kwarg):
@@ -146,15 +275,25 @@ class BasePolicyTest(test.TestCase):
def ensure_raises(req, *args, **kwargs):
exc = self.assertRaises(
exception.PolicyNotAuthorized, func, req, *arg, **kwarg)
+ # NOTE(danms): We may need to check a different rule_name
+ # as the enforced policy, based on the context we are
+ # using. Examples are multi-policy APIs for similar
+ # reasons as below. If we are passed a function for
+ # rule_name, call it with the context being used to
+ # determine the rule_name we should verify.
+ if callable(rule_name):
+ actual_rule_name = rule_name(req.environ['nova.context'])
+ else:
+ actual_rule_name = rule_name
# NOTE(gmann): In case of multi-policy APIs, PolicyNotAuthorized
# exception can be raised from either of the policy so checking
# the error message, which includes the rule name, can mismatch.
# Tests verifying the multi policy can pass rule_name as None
# to skip the error message assert.
- if rule_name is not None:
+ if actual_rule_name is not None:
self.assertEqual(
"Policy doesn't allow %s to be performed." %
- rule_name, exc.format_message())
+ actual_rule_name, exc.format_message())
# Verify all the context having allowed scope and roles pass
# the policy check.
for context in authorized_contexts:
diff --git a/nova/tests/unit/policies/test_admin_actions.py b/nova/tests/unit/policies/test_admin_actions.py
index c5522616ff..21157fd832 100644
--- a/nova/tests/unit/policies/test_admin_actions.py
+++ b/nova/tests/unit/policies/test_admin_actions.py
@@ -10,8 +10,9 @@
# License for the specific language governing permissions and limitations
# under the License.
+from unittest import mock
+
import fixtures
-import mock
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import timeutils
@@ -40,40 +41,42 @@ class AdminActionsPolicyTest(base.BasePolicyTest):
uuid = uuids.fake_id
self.instance = fake_instance.fake_instance_obj(
self.project_member_context,
- id=1, uuid=uuid, vm_state=vm_states.ACTIVE,
- task_state=None, launched_at=timeutils.utcnow())
+ id=1, uuid=uuid, project_id=self.project_id,
+ vm_state=vm_states.ACTIVE, task_state=None,
+ launched_at=timeutils.utcnow())
self.mock_get.return_value = self.instance
- # Check that admin is able to change the service
- self.admin_authorized_contexts = [
+ # By default, legacy rule are enable and scope check is disabled.
+ # system admin, legacy admin, and project admin is able to perform
+ # server admin actions
+ self.project_action_authorized_contexts = [
self.legacy_admin_context, self.system_admin_context,
self.project_admin_context]
- # Check that non-admin is not able to change the service
- self.admin_unauthorized_contexts = [
- self.system_member_context, self.system_reader_context,
- self.system_foo_context, self.project_member_context,
- self.other_project_member_context,
- self.other_project_reader_context,
- self.project_foo_context, self.project_reader_context
- ]
@mock.patch('nova.objects.Instance.save')
def test_reset_state_policy(self, mock_save):
rule_name = "os_compute_api:os-admin-actions:reset_state"
- self.common_policy_check(self.admin_authorized_contexts,
- self.admin_unauthorized_contexts,
- rule_name, self.controller._reset_state,
- self.req, self.instance.uuid,
- body={'os-resetState': {'state': 'active'}})
+ self.common_policy_auth(self.project_action_authorized_contexts,
+ rule_name, self.controller._reset_state,
+ self.req, self.instance.uuid,
+ body={'os-resetState': {'state': 'active'}})
def test_inject_network_info_policy(self):
rule_name = "os_compute_api:os-admin-actions:inject_network_info"
with mock.patch.object(self.controller.compute_api,
"inject_network_info"):
- self.common_policy_check(self.admin_authorized_contexts,
- self.admin_unauthorized_contexts,
- rule_name,
- self.controller._inject_network_info,
- self.req, self.instance.uuid, body={})
+ self.common_policy_auth(self.project_action_authorized_contexts,
+ rule_name,
+ self.controller._inject_network_info,
+ self.req, self.instance.uuid, body={})
+
+
+class AdminActionsNoLegacyNoScopePolicyTest(AdminActionsPolicyTest):
+ """Test Admin Actions APIs policies with no legacy deprecated rules
+ and no scope checks which means new defaults only.
+
+ """
+
+ without_deprecated_rules = True
class AdminActionsScopeTypePolicyTest(AdminActionsPolicyTest):
@@ -90,27 +93,15 @@ class AdminActionsScopeTypePolicyTest(AdminActionsPolicyTest):
def setUp(self):
super(AdminActionsScopeTypePolicyTest, self).setUp()
self.flags(enforce_scope=True, group="oslo_policy")
+ # With scope enable, system admin will not be able to
+ # perform server admin actions.
+ self.project_action_authorized_contexts = [
+ self.legacy_admin_context, self.project_admin_context]
-class AdminActionsNoLegacyPolicyTest(AdminActionsScopeTypePolicyTest):
+class AdminActionsScopeTypeNoLegacyPolicyTest(AdminActionsScopeTypePolicyTest):
"""Test Admin Actions APIs policies with system scope enabled,
- and no more deprecated rules.
+ and no more deprecated rules which means scope + new defaults so
+ only project admin is able to perform admin action on their server.
"""
without_deprecated_rules = True
-
- def setUp(self):
- super(AdminActionsScopeTypePolicyTest, self).setUp()
- # Check that system admin is able to perform the system level actions
- # on server.
- self.admin_authorized_contexts = [
- self.system_admin_context]
- # Check that non-system or non-admin is not able to perform the system
- # level actions on server.
- self.admin_unauthorized_contexts = [
- self.legacy_admin_context, self.system_member_context,
- self.system_reader_context, self.system_foo_context,
- self.project_admin_context, self.project_member_context,
- self.other_project_member_context,
- self.other_project_reader_context,
- self.project_foo_context, self.project_reader_context
- ]
diff --git a/nova/tests/unit/policies/test_admin_password.py b/nova/tests/unit/policies/test_admin_password.py
index b733f83e5d..01cce2950e 100644
--- a/nova/tests/unit/policies/test_admin_password.py
+++ b/nova/tests/unit/policies/test_admin_password.py
@@ -10,8 +10,9 @@
# License for the specific language governing permissions and limitations
# under the License.
+from unittest import mock
+
import fixtures
-import mock
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import timeutils
@@ -47,28 +48,23 @@ class AdminPasswordPolicyTest(base.BasePolicyTest):
user_id=user_id, vm_state=vm_states.ACTIVE,
task_state=None, launched_at=timeutils.utcnow())
self.mock_get.return_value = self.instance
- # Check that admin or and server owner is able to change the password
- self.admin_authorized_contexts = [
+ # With legacy rule and no scope checks, all admin, project members
+ # project reader or other project role(because legacy rule allow server
+ # owner- having same project id and no role check) is able to change
+ # the password for their server.
+ self.project_action_authorized_contexts = [
self.legacy_admin_context, self.system_admin_context,
self.project_admin_context, self.project_member_context,
self.project_reader_context, self.project_foo_context]
- # Check that non-admin is not able to change the password
- self.admin_unauthorized_contexts = [
- self.system_member_context, self.system_reader_context,
- self.system_foo_context,
- self.other_project_member_context,
- self.other_project_reader_context,
- ]
@mock.patch('nova.compute.api.API.set_admin_password')
def test_change_paassword_policy(self, mock_password):
- self.common_policy_check(self.admin_authorized_contexts,
- self.admin_unauthorized_contexts,
- self.rule_name,
- self.controller.change_password,
- self.req, self.instance.uuid,
- body={'changePassword': {
- 'adminPass': '1234pass'}})
+ self.common_policy_auth(self.project_action_authorized_contexts,
+ self.rule_name,
+ self.controller.change_password,
+ self.req, self.instance.uuid,
+ body={'changePassword': {
+ 'adminPass': '1234pass'}})
def test_change_password_overridden_policy_failed_with_other_user(self):
# Change the user_id in request context.
@@ -93,6 +89,22 @@ class AdminPasswordPolicyTest(base.BasePolicyTest):
mock.ANY, '1234pass')
+class AdminPasswordNoLegacyNoScopePolicyTest(AdminPasswordPolicyTest):
+ """Test Admin Password APIs policies with no legacy deprecated rules
+ and no scope checks which means new defaults only.
+
+ """
+
+ without_deprecated_rules = True
+
+ def setUp(self):
+ super(AdminPasswordNoLegacyNoScopePolicyTest, self).setUp()
+ # With no legacy rule, only project admin or member will be
+ # able to change the server password.
+ self.project_action_authorized_contexts = (
+ self.project_member_or_admin_with_no_scope_no_legacy)
+
+
class AdminPasswordScopeTypePolicyTest(AdminPasswordPolicyTest):
"""Test Admin Password APIs policies with system scope enabled.
This class set the nova.conf [oslo_policy] enforce_scope to True
@@ -106,31 +118,24 @@ class AdminPasswordScopeTypePolicyTest(AdminPasswordPolicyTest):
def setUp(self):
super(AdminPasswordScopeTypePolicyTest, self).setUp()
self.flags(enforce_scope=True, group="oslo_policy")
+ # Scope enable will not allow system admin to change password.
+ self.project_action_authorized_contexts = (
+ self.project_m_r_or_admin_with_scope_and_legacy)
-class AdminPasswordNoLegacyPolicyTest(AdminPasswordPolicyTest):
+class AdminPasswordScopeTypeNoLegacyTest(AdminPasswordScopeTypePolicyTest):
"""Test Admin Password APIs policies with system scope enabled,
- and no more deprecated rules that allow the legacy admin API to
- access system_admin_or_owner APIs.
+ and no more deprecated rules which means scope + new defaults so
+ only project admin and member is able to change their server password.
"""
+
without_deprecated_rules = True
def setUp(self):
- super(AdminPasswordNoLegacyPolicyTest, self).setUp()
+ super(AdminPasswordScopeTypeNoLegacyTest, self).setUp()
self.flags(enforce_scope=True, group="oslo_policy")
- # Check that system or projct admin or owner is able to change
- # the password.
- self.admin_authorized_contexts = [
- self.system_admin_context,
- self.project_admin_context, self.project_member_context]
- # Check that non-system and non-admin/owner is not able to change the
- # password.
- self.admin_unauthorized_contexts = [
- self.legacy_admin_context, self.project_reader_context,
- self.project_foo_context,
- self.system_member_context, self.system_reader_context,
- self.system_foo_context,
- self.other_project_member_context,
- self.other_project_reader_context,
- ]
+ # With scope enable and no legacy rule only project admin/member
+ # will be able to change password for the server.
+ self.project_action_authorized_contexts = (
+ self.project_member_or_admin_with_scope_no_legacy)
diff --git a/nova/tests/unit/policies/test_aggregates.py b/nova/tests/unit/policies/test_aggregates.py
index ce3c00f30b..6ac7b6e010 100644
--- a/nova/tests/unit/policies/test_aggregates.py
+++ b/nova/tests/unit/policies/test_aggregates.py
@@ -10,7 +10,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from oslo_utils.fixture import uuidsentinel as uuids
from nova.api.openstack.compute import aggregates
@@ -31,39 +32,19 @@ class AggregatesPolicyTest(base.BasePolicyTest):
super(AggregatesPolicyTest, self).setUp()
self.controller = aggregates.AggregateController()
self.req = fakes.HTTPRequest.blank('')
- # Check that admin is able to perform Aggregate Operations
- self.admin_authorized_contexts = [
+ # With legacy rule and scope check disabled by default, system admin,
+ # legacy admin, and project admin will be able to perform Aggregate
+ # Operations.
+ self.project_admin_authorized_contexts = [
self.legacy_admin_context, self.system_admin_context,
self.project_admin_context]
- # Check that non-admin is not able to perform Aggregate Operations
- self.admin_unauthorized_contexts = [
- self.system_member_context, self.system_reader_context,
- self.system_foo_context, self.project_member_context,
- self.other_project_member_context,
- self.other_project_reader_context,
- self.project_foo_context, self.project_reader_context
- ]
-
- # Check that system reader is able to get Aggregate
- self.system_reader_authorized_contexts = [
- self.legacy_admin_context, self.system_admin_context,
- self.project_admin_context, self.system_member_context,
- self.system_reader_context]
- # Check that non-admin is not able to get Aggregate
- self.system_reader_unauthorized_contexts = [
- self.system_foo_context, self.project_member_context,
- self.other_project_member_context,
- self.other_project_reader_context,
- self.project_foo_context, self.project_reader_context
- ]
@mock.patch('nova.compute.api.AggregateAPI.get_aggregate_list')
def test_list_aggregate_policy(self, mock_list):
rule_name = "os_compute_api:os-aggregates:index"
- self.common_policy_check(self.system_reader_authorized_contexts,
- self.system_reader_unauthorized_contexts,
- rule_name, self.controller.index,
- self.req)
+ self.common_policy_auth(self.project_admin_authorized_contexts,
+ rule_name, self.controller.index,
+ self.req)
@mock.patch('nova.compute.api.AggregateAPI.create_aggregate')
def test_create_aggregate_policy(self, mock_create):
@@ -74,66 +55,59 @@ class AggregatesPolicyTest(base.BasePolicyTest):
"hosts": ["host1", "host2"]})
body = {"aggregate": {"name": "test",
"availability_zone": "nova1"}}
- self.common_policy_check(self.admin_authorized_contexts,
- self.admin_unauthorized_contexts,
- rule_name,
- self.controller.create,
- self.req, body=body)
+ self.common_policy_auth(self.project_admin_authorized_contexts,
+ rule_name,
+ self.controller.create,
+ self.req, body=body)
@mock.patch('nova.compute.api.AggregateAPI.update_aggregate')
def test_update_aggregate_policy(self, mock_update):
rule_name = "os_compute_api:os-aggregates:update"
- self.common_policy_check(self.admin_authorized_contexts,
- self.admin_unauthorized_contexts,
- rule_name, self.controller.update,
- self.req, 1,
- body={"aggregate": {"name": "new_name"}})
+ self.common_policy_auth(self.project_admin_authorized_contexts,
+ rule_name, self.controller.update,
+ self.req, 1,
+ body={"aggregate": {"name": "new_name"}})
@mock.patch('nova.compute.api.AggregateAPI.delete_aggregate')
def test_delete_aggregate_policy(self, mock_delete):
rule_name = "os_compute_api:os-aggregates:delete"
- self.common_policy_check(self.admin_authorized_contexts,
- self.admin_unauthorized_contexts,
- rule_name,
- self.controller.delete,
- self.req, 1)
+ self.common_policy_auth(self.project_admin_authorized_contexts,
+ rule_name,
+ self.controller.delete,
+ self.req, 1)
@mock.patch('nova.compute.api.AggregateAPI.get_aggregate')
def test_show_aggregate_policy(self, mock_show):
rule_name = "os_compute_api:os-aggregates:show"
- self.common_policy_check(self.system_reader_authorized_contexts,
- self.system_reader_unauthorized_contexts,
- rule_name, self.controller.show,
- self.req, 1)
+ self.common_policy_auth(self.project_admin_authorized_contexts,
+ rule_name, self.controller.show,
+ self.req, 1)
@mock.patch('nova.compute.api.AggregateAPI.update_aggregate_metadata')
def test_set_metadata_aggregate_policy(self, mock_metadata):
rule_name = "os_compute_api:os-aggregates:set_metadata"
body = {"set_metadata": {"metadata": {"foo": "bar"}}}
- self.common_policy_check(self.admin_authorized_contexts,
- self.admin_unauthorized_contexts,
- rule_name,
- self.controller._set_metadata,
- self.req, 1, body=body)
+ self.common_policy_auth(self.project_admin_authorized_contexts,
+ rule_name,
+ self.controller._set_metadata,
+ self.req, 1, body=body)
@mock.patch('nova.compute.api.AggregateAPI.add_host_to_aggregate')
def test_add_host_aggregate_policy(self, mock_add):
rule_name = "os_compute_api:os-aggregates:add_host"
- self.common_policy_check(self.admin_authorized_contexts,
- self.admin_unauthorized_contexts,
- rule_name, self.controller._add_host,
- self.req, 1,
- body={"add_host": {"host": "host1"}})
+ self.common_policy_auth(self.project_admin_authorized_contexts,
+ rule_name, self.controller._add_host,
+ self.req, 1,
+ body={"add_host": {"host": "host1"}})
@mock.patch('nova.compute.api.AggregateAPI.remove_host_from_aggregate')
def test_remove_host_aggregate_policy(self, mock_remove):
rule_name = "os_compute_api:os-aggregates:remove_host"
- self.common_policy_check(self.admin_authorized_contexts,
- self.admin_unauthorized_contexts,
- rule_name,
- self.controller._remove_host,
- self.req, 1,
- body={"remove_host": {"host": "host1"}})
+ self.common_policy_auth(self.project_admin_authorized_contexts,
+ rule_name,
+ self.controller._remove_host,
+ self.req, 1,
+ body={"remove_host": {"host": "host1"}})
@mock.patch('nova.compute.api.AggregateAPI.get_aggregate')
def test_images_aggregate_policy(self, mock_get):
@@ -144,10 +118,21 @@ class AggregatesPolicyTest(base.BasePolicyTest):
body = {'cache': [{'id': uuids.fake_id}]}
req = fakes.HTTPRequest.blank('', version='2.81')
with mock.patch('nova.conductor.api.ComputeTaskAPI.cache_images'):
- self.common_policy_check(self.admin_authorized_contexts,
- self.admin_unauthorized_contexts,
- rule_name, self.controller.images,
- req, 1, body=body)
+ self.common_policy_auth(self.project_admin_authorized_contexts,
+ rule_name, self.controller.images,
+ req, 1, body=body)
+
+
+class AggregatesNoLegacyNoScopePolicyTest(AggregatesPolicyTest):
+ """Test Aggregates APIs policies with no legacy deprecated rules
+ and no scope checks which means new defaults only. In this case
+ system admin, legacy admin, and project admin will be able to
+ perform Aggregate Operations. Legacy admin will be allowed as policy
+ is just admin if no scope checks.
+
+ """
+
+ without_deprecated_rules = True
class AggregatesScopeTypePolicyTest(AggregatesPolicyTest):
@@ -164,28 +149,16 @@ class AggregatesScopeTypePolicyTest(AggregatesPolicyTest):
super(AggregatesScopeTypePolicyTest, self).setUp()
self.flags(enforce_scope=True, group="oslo_policy")
- # Check that system admin is able to perform Aggregate Operations.
- self.admin_authorized_contexts = [
- self.system_admin_context]
- # Check that non-system or non-admin is not able to perform
- # Aggregate Operations.
- self.admin_unauthorized_contexts = [
- self.legacy_admin_context, self.system_member_context,
- self.system_reader_context, self.system_foo_context,
- self.project_admin_context, self.project_member_context,
- self.other_project_member_context,
- self.other_project_reader_context,
- self.project_foo_context, self.project_reader_context
- ]
- # Check that system reader is able to get Aggregate
- self.system_reader_authorized_contexts = [
- self.system_admin_context, self.system_member_context,
- self.system_reader_context]
- # Check that non-admin is not able to get Aggregate
- self.system_reader_unauthorized_contexts = [
- self.legacy_admin_context, self.project_admin_context,
- self.system_foo_context, self.project_member_context,
- self.other_project_member_context,
- self.other_project_reader_context,
- self.project_foo_context, self.project_reader_context
- ]
+ # With scope checks enabled, only project-scoped admins are
+ # able to perform Aggregate Operations.
+ self.project_admin_authorized_contexts = [self.legacy_admin_context,
+ self.project_admin_context]
+
+
+class AggregatesScopeTypeNoLegacyPolicyTest(AggregatesScopeTypePolicyTest):
+ """Test Aggregates APIs policies with no legacy deprecated rules
+ and scope checks enabled which means scope + new defaults so
+ only system admin is able to perform aggregates Operations.
+ """
+
+ without_deprecated_rules = True
diff --git a/nova/tests/unit/policies/test_assisted_volume_snapshots.py b/nova/tests/unit/policies/test_assisted_volume_snapshots.py
index 3c9836e07b..dce62e5bcc 100644
--- a/nova/tests/unit/policies/test_assisted_volume_snapshots.py
+++ b/nova/tests/unit/policies/test_assisted_volume_snapshots.py
@@ -10,7 +10,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from oslo_serialization import jsonutils
from oslo_utils.fixture import uuidsentinel as uuids
import urllib
@@ -32,18 +33,12 @@ class AssistedVolumeSnapshotPolicyTest(base.BasePolicyTest):
super(AssistedVolumeSnapshotPolicyTest, self).setUp()
self.controller = snapshots.AssistedVolumeSnapshotsController()
self.req = fakes.HTTPRequest.blank('')
- # Check that admin is able to take volume snapshot.
- self.admin_authorized_contexts = [
+ # By default, legacy rule are enable and scope check is disabled.
+ # system admin, legacy admin, and project admin is able to
+ # take volume snapshot.
+ self.project_admin_authorized_contexts = [
self.legacy_admin_context, self.system_admin_context,
self.project_admin_context]
- # Check that non-admin is not able to take volume snapshot.
- self.admin_unauthorized_contexts = [
- self.system_member_context, self.system_reader_context,
- self.system_foo_context, self.project_member_context,
- self.other_project_member_context,
- self.other_project_reader_context,
- self.project_foo_context, self.project_reader_context
- ]
@mock.patch('nova.compute.api.API.volume_snapshot_create')
def test_assisted_create_policy(self, mock_create):
@@ -52,10 +47,9 @@ class AssistedVolumeSnapshotPolicyTest(base.BasePolicyTest):
'create_info': {'type': 'qcow2',
'new_file': 'new_file',
'snapshot_id': 'snapshot_id'}}}
- self.common_policy_check(self.admin_authorized_contexts,
- self.admin_unauthorized_contexts,
- rule_name, self.controller.create,
- self.req, body=body)
+ self.common_policy_auth(self.project_admin_authorized_contexts,
+ rule_name, self.controller.create,
+ self.req, body=body)
@mock.patch('nova.compute.api.API.volume_snapshot_delete')
def test_assisted_delete_policy(self, mock_delete):
@@ -64,11 +58,20 @@ class AssistedVolumeSnapshotPolicyTest(base.BasePolicyTest):
'delete_info': jsonutils.dumps({'volume_id': '1'}),
}
req = fakes.HTTPRequest.blank('?%s' % urllib.parse.urlencode(params))
- self.common_policy_check(self.admin_authorized_contexts,
- self.admin_unauthorized_contexts,
- rule_name,
- self.controller.delete,
- req, 1)
+ self.common_policy_auth(self.project_admin_authorized_contexts,
+ rule_name,
+ self.controller.delete,
+ req, 1)
+
+
+class AssistedSnapshotNoLegacyNoScopePolicyTest(
+ AssistedVolumeSnapshotPolicyTest):
+ """Test Assisted Snapshot APIs policies with no legacy deprecated rules
+ and no scope checks.
+
+ """
+
+ without_deprecated_rules = True
class AssistedSnapshotScopeTypePolicyTest(AssistedVolumeSnapshotPolicyTest):
@@ -85,16 +88,15 @@ class AssistedSnapshotScopeTypePolicyTest(AssistedVolumeSnapshotPolicyTest):
super(AssistedSnapshotScopeTypePolicyTest, self).setUp()
self.flags(enforce_scope=True, group="oslo_policy")
- # Check that system admin is able to take volume snapshot.
- self.admin_authorized_contexts = [
- self.system_admin_context]
- # Check that non-system or non-admin is not able to take volume
- # snapshot.
- self.admin_unauthorized_contexts = [
- self.legacy_admin_context, self.system_member_context,
- self.system_reader_context, self.system_foo_context,
- self.project_admin_context, self.project_member_context,
- self.other_project_member_context,
- self.other_project_reader_context,
- self.project_foo_context, self.project_reader_context
- ]
+ # With scope check enabled, system admin is not able to
+ # take volume snapshot.
+ self.project_admin_authorized_contexts = [
+ self.legacy_admin_context, self.project_admin_context]
+
+
+class AssistedSnapshotScopeTypeNoLegacyPolicyTest(
+ AssistedSnapshotScopeTypePolicyTest):
+ """Test os-volume-attachments APIs policies with system scope enabled,
+ and no legacy deprecated rules.
+ """
+ without_deprecated_rules = True
diff --git a/nova/tests/unit/policies/test_attach_interfaces.py b/nova/tests/unit/policies/test_attach_interfaces.py
index 05f62d5cf0..33c531c9c7 100644
--- a/nova/tests/unit/policies/test_attach_interfaces.py
+++ b/nova/tests/unit/policies/test_attach_interfaces.py
@@ -10,8 +10,9 @@
# License for the specific language governing permissions and limitations
# under the License.
+from unittest import mock
+
import fixtures
-import mock
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import timeutils
@@ -47,40 +48,25 @@ class AttachInterfacesPolicyTest(base.BasePolicyTest):
vm_state=vm_states.ACTIVE,
task_state=None, launched_at=timeutils.utcnow())
self.mock_get.return_value = self.instance
- self.admin_authorized_contexts = [
- self.legacy_admin_context, self.system_admin_context,
- self.project_admin_context, self.project_foo_context,
- self.project_reader_context, self.project_member_context
- ]
-
- self.admin_unauthorized_contexts = [
- self.system_member_context, self.system_reader_context,
- self.system_foo_context,
- self.other_project_member_context,
- self.other_project_reader_context,
- ]
-
- self.reader_authorized_contexts = [
+ # With legacy rule and no scope checks, all admin, project members
+ # project reader or other project role(because legacy rule allow server
+ # owner- having same project id and no role check) is able to attach,
+ # detach an interface from a server.
+ self.project_member_authorized_contexts = [
self.legacy_admin_context, self.system_admin_context,
- self.project_admin_context, self.system_member_context,
- self.system_reader_context, self.project_reader_context,
- self.project_member_context, self.project_foo_context
- ]
-
- self.reader_unauthorized_contexts = [
- self.system_foo_context,
- self.other_project_member_context,
- self.other_project_reader_context,
- ]
+ self.project_admin_context, self.project_member_context,
+ self.project_reader_context, self.project_foo_context]
+ # and they can get their own server attached interfaces.
+ self.project_reader_authorized_contexts = (
+ self.project_member_authorized_contexts)
@mock.patch('nova.compute.api.API.get')
@mock.patch('nova.network.neutron.API.list_ports')
def test_index_interfaces_policy(self, mock_port, mock_get):
rule_name = "os_compute_api:os-attach-interfaces:list"
- self.common_policy_check(self.reader_authorized_contexts,
- self.reader_unauthorized_contexts,
- rule_name, self.controller.index,
- self.req, uuids.fake_id)
+ self.common_policy_auth(self.project_reader_authorized_contexts,
+ rule_name, self.controller.index,
+ self.req, uuids.fake_id)
@mock.patch('nova.compute.api.API.get')
@mock.patch('nova.network.neutron.API.show_port')
@@ -97,11 +83,10 @@ class AttachInterfacesPolicyTest(base.BasePolicyTest):
"fixed_ips": ["10.0.2.2"],
"device_id": server_id,
}}
- self.common_policy_check(self.reader_authorized_contexts,
- self.reader_unauthorized_contexts,
- rule_name,
- self.controller.show,
- self.req, server_id, port_id)
+ self.common_policy_auth(self.project_reader_authorized_contexts,
+ rule_name,
+ self.controller.show,
+ self.req, server_id, port_id)
@mock.patch('nova.compute.api.API.get')
@mock.patch('nova.api.openstack.compute.attach_interfaces'
@@ -110,19 +95,43 @@ class AttachInterfacesPolicyTest(base.BasePolicyTest):
def test_attach_interface(self, mock_interface, mock_port, mock_get):
rule_name = "os_compute_api:os-attach-interfaces:create"
body = {'interfaceAttachment': {'net_id': uuids.fake_id}}
- self.common_policy_check(self.admin_authorized_contexts,
- self.admin_unauthorized_contexts,
- rule_name, self.controller.create,
- self.req, uuids.fake_id, body=body)
+ self.common_policy_auth(self.project_member_authorized_contexts,
+ rule_name, self.controller.create,
+ self.req, uuids.fake_id, body=body)
@mock.patch('nova.compute.api.API.get')
@mock.patch('nova.compute.api.API.detach_interface')
def test_delete_interface(self, mock_detach, mock_get):
rule_name = "os_compute_api:os-attach-interfaces:delete"
- self.common_policy_check(self.admin_authorized_contexts,
- self.admin_unauthorized_contexts,
- rule_name, self.controller.delete,
- self.req, uuids.fake_id, uuids.fake_id)
+ self.common_policy_auth(self.project_member_authorized_contexts,
+ rule_name, self.controller.delete,
+ self.req, uuids.fake_id, uuids.fake_id)
+
+
+class AttachInterfacesNoLegacyNoScopePolicyTest(AttachInterfacesPolicyTest):
+ """Test Attach Interfaces APIs policies with no legacy deprecated rules
+ and no scope checks.
+
+ """
+
+ without_deprecated_rules = True
+ rules_without_deprecation = {
+ ai_policies.POLICY_ROOT % 'list':
+ base_policy.PROJECT_READER_OR_ADMIN,
+ ai_policies.POLICY_ROOT % 'show':
+ base_policy.PROJECT_READER_OR_ADMIN,
+ ai_policies.POLICY_ROOT % 'create':
+ base_policy.PROJECT_MEMBER_OR_ADMIN,
+ ai_policies.POLICY_ROOT % 'delete':
+ base_policy.PROJECT_MEMBER_OR_ADMIN}
+
+ def setUp(self):
+ super(AttachInterfacesNoLegacyNoScopePolicyTest, self).setUp()
+ # With no legacy rule, legacy admin loose power.
+ self.project_member_authorized_contexts = (
+ self.project_member_or_admin_with_no_scope_no_legacy)
+ self.project_reader_authorized_contexts = (
+ self.project_reader_or_admin_with_no_scope_no_legacy)
class AttachInterfacesScopeTypePolicyTest(AttachInterfacesPolicyTest):
@@ -138,6 +147,11 @@ class AttachInterfacesScopeTypePolicyTest(AttachInterfacesPolicyTest):
def setUp(self):
super(AttachInterfacesScopeTypePolicyTest, self).setUp()
self.flags(enforce_scope=True, group="oslo_policy")
+ # With Scope enable, system users no longer allowed.
+ self.project_member_authorized_contexts = (
+ self.project_m_r_or_admin_with_scope_and_legacy)
+ self.project_reader_authorized_contexts = (
+ self.project_m_r_or_admin_with_scope_and_legacy)
class AttachInterfacesDeprecatedPolicyTest(base.BasePolicyTest):
@@ -173,12 +187,12 @@ class AttachInterfacesDeprecatedPolicyTest(base.BasePolicyTest):
@mock.patch('nova.network.neutron.API.list_ports')
def test_deprecated_policy_overridden_rule_is_checked(self, mock_port,
mock_get):
- # Test to verify if deprecatd overridden policy is working.
+ # Test to verify if deprecated overridden policy is working.
# check for success as admin role. Deprecated rule
# has been overridden with admin checks in policy.yaml
# If admin role pass it means overridden rule is enforced by
- # olso.policy because new default is system or project reader and the
+ # oslo.policy because new default is system or project reader and the
# old default is admin.
self.controller.index(self.admin_req, uuids.fake_id)
@@ -192,55 +206,27 @@ class AttachInterfacesDeprecatedPolicyTest(base.BasePolicyTest):
exc.format_message())
-class AttachInterfacesNoLegacyPolicyTest(AttachInterfacesPolicyTest):
+class AttachInterfacesScopeTypeNoLegacyPolicyTest(
+ AttachInterfacesScopeTypePolicyTest):
"""Test Attach Interfaces APIs policies with system scope enabled,
- and no more deprecated rules that allow the legacy admin API to
- access system_admin_or_owner APIs.
+ and no more deprecated rules.
"""
without_deprecated_rules = True
rules_without_deprecation = {
ai_policies.POLICY_ROOT % 'list':
- base_policy.PROJECT_READER_OR_SYSTEM_READER,
+ base_policy.PROJECT_READER_OR_ADMIN,
ai_policies.POLICY_ROOT % 'show':
- base_policy.PROJECT_READER_OR_SYSTEM_READER,
+ base_policy.PROJECT_READER_OR_ADMIN,
ai_policies.POLICY_ROOT % 'create':
- base_policy.PROJECT_MEMBER_OR_SYSTEM_ADMIN,
+ base_policy.PROJECT_MEMBER_OR_ADMIN,
ai_policies.POLICY_ROOT % 'delete':
- base_policy.PROJECT_MEMBER_OR_SYSTEM_ADMIN}
+ base_policy.PROJECT_MEMBER_OR_ADMIN}
def setUp(self):
- super(AttachInterfacesNoLegacyPolicyTest, self).setUp()
- self.flags(enforce_scope=True, group="oslo_policy")
-
- # Check that system or projct admin or owner is able to
- # create or delete interfaces.
- self.admin_authorized_contexts = [
- self.system_admin_context,
- self.project_admin_context, self.project_member_context]
- # Check that non-system and non-admin/owner is not able to
- # create or delete interfaces.
- self.admin_unauthorized_contexts = [
- self.legacy_admin_context, self.project_reader_context,
- self.project_foo_context,
- self.system_member_context, self.system_reader_context,
- self.system_foo_context,
- self.other_project_member_context,
- self.other_project_reader_context,
- ]
-
- # Check that system reader or projct is able to
- # create or delete interfaces.
- self.reader_authorized_contexts = [
- self.system_admin_context,
- self.project_admin_context, self.system_member_context,
- self.system_reader_context, self.project_reader_context,
- self.project_member_context
- ]
-
- # Check that non-system reader nd non-admin/owner is not able to
- # create or delete interfaces.
- self.reader_unauthorized_contexts = [
- self.legacy_admin_context, self.project_foo_context,
- self.system_foo_context, self.other_project_member_context,
- self.other_project_reader_context,
- ]
+ super(AttachInterfacesScopeTypeNoLegacyPolicyTest, self).setUp()
+ # With no legacy and scope enable, only project admin, member,
+ # and reader will be able to allowed operation on server interface.
+ self.project_member_authorized_contexts = (
+ self.project_member_or_admin_with_scope_no_legacy)
+ self.project_reader_authorized_contexts = (
+ self.project_reader_or_admin_with_scope_no_legacy)
diff --git a/nova/tests/unit/policies/test_availability_zone.py b/nova/tests/unit/policies/test_availability_zone.py
index 454692fde8..1852f8444c 100644
--- a/nova/tests/unit/policies/test_availability_zone.py
+++ b/nova/tests/unit/policies/test_availability_zone.py
@@ -10,7 +10,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from nova.api.openstack.compute import availability_zone
from nova.tests.unit.api.openstack import fakes
@@ -31,49 +31,38 @@ class AvailabilityZonePolicyTest(base.BasePolicyTest):
self.controller = availability_zone.AvailabilityZoneController()
self.req = fakes.HTTPRequest.blank('')
- # Check that everyone is able to list the AZ
- self.everyone_authorized_contexts = [
+ # With legacy rule and scope check disabled by default, system admin,
+ # legacy admin, and project admin will be able to get AZ with host
+ # information.
+ self.project_admin_authorized_contexts = [
self.legacy_admin_context, self.system_admin_context,
- self.project_admin_context, self.system_member_context,
- self.system_reader_context, self.system_foo_context,
- self.project_member_context, self.other_project_member_context,
- self.project_foo_context, self.project_reader_context,
- self.other_project_reader_context,
- ]
- self.everyone_unauthorized_contexts = []
-
- # Check that system reader is able to list the AZ Detail
- # NOTE(gmann): Until old default rule which is admin_api is
- # deprecated and not removed, project admin and legacy admin
- # will be able to list the AZ. This make sure that existing
- # tokens will keep working even we have changed this policy defaults
- # to reader role.
- self.reader_authorized_contexts = [
- self.system_admin_context, self.system_member_context,
- self.system_reader_context, self.legacy_admin_context,
self.project_admin_context]
- # Check that non-system-reader are not able to list the AZ.
- self.reader_unauthorized_contexts = [
- self.system_foo_context, self.other_project_member_context,
- self.project_foo_context, self.project_member_context,
- self.project_reader_context,
- self.other_project_reader_context,
- ]
+ self.project_authorized_contexts = self.all_contexts
@mock.patch('nova.objects.Instance.save')
def test_availability_zone_list_policy(self, mock_save):
rule_name = "os_compute_api:os-availability-zone:list"
- self.common_policy_check(self.everyone_authorized_contexts,
- self.everyone_unauthorized_contexts,
- rule_name, self.controller.index,
- self.req)
+ self.common_policy_auth(self.project_authorized_contexts,
+ rule_name, self.controller.index,
+ self.req)
def test_availability_zone_detail_policy(self):
rule_name = "os_compute_api:os-availability-zone:detail"
- self.common_policy_check(self.reader_authorized_contexts,
- self.reader_unauthorized_contexts,
- rule_name, self.controller.detail,
- self.req)
+ self.common_policy_auth(self.project_admin_authorized_contexts,
+ rule_name, self.controller.detail,
+ self.req)
+
+
+class AvailabilityZoneNoLegacyNoScopePolicyTest(AvailabilityZonePolicyTest):
+ """Test Availability Zones APIs policies with no legacy deprecated rules
+ and no scope checks which means new defaults only. In this case
+ system admin, legacy admin, and project admin will be able to get
+ AZ with host information. Legacy admin will be allowed as policy
+ is just admin if no scope checks.
+
+ """
+
+ without_deprecated_rules = True
class AvailabilityZoneScopeTypePolicyTest(AvailabilityZonePolicyTest):
@@ -91,15 +80,17 @@ class AvailabilityZoneScopeTypePolicyTest(AvailabilityZonePolicyTest):
super(AvailabilityZoneScopeTypePolicyTest, self).setUp()
self.flags(enforce_scope=True, group="oslo_policy")
- # Check that system reader is able to list the AZ.
- self.reader_authorized_contexts = [
- self.system_admin_context, self.system_member_context,
- self.system_reader_context]
- # Check that non-system-reader is not able to list AZ.
- self.reader_unauthorized_contexts = [
- self.system_foo_context, self.legacy_admin_context,
- self.project_admin_context, self.project_member_context,
- self.other_project_member_context,
- self.project_foo_context, self.project_reader_context,
- self.other_project_reader_context,
- ]
+ # With scope checks enable, only project-scoped admins are
+ # able to get AZ with host information.
+ self.project_admin_authorized_contexts = [self.legacy_admin_context,
+ self.project_admin_context]
+ self.project_authorized_contexts = self.all_project_contexts
+
+
+class AZScopeTypeNoLegacyPolicyTest(AvailabilityZoneScopeTypePolicyTest):
+ """Test Availability Zones APIs policies with no legacy deprecated rules
+ and scope checks enabled which means scope + new defaults so
+ only system admin is able to get AZ with host information.
+ """
+
+ without_deprecated_rules = True
diff --git a/nova/tests/unit/policies/test_baremetal_nodes.py b/nova/tests/unit/policies/test_baremetal_nodes.py
index 77e6def26d..68f02087c4 100644
--- a/nova/tests/unit/policies/test_baremetal_nodes.py
+++ b/nova/tests/unit/policies/test_baremetal_nodes.py
@@ -10,7 +10,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from oslo_utils.fixture import uuidsentinel as uuids
from nova.api.openstack.compute import baremetal_nodes
@@ -40,25 +41,17 @@ class BaremetalNodesPolicyTest(base.BasePolicyTest):
self.stub_out('nova.api.openstack.compute.'
'baremetal_nodes._get_ironic_client',
lambda *_: FAKE_IRONIC_CLIENT)
- # Check that system reader is able to get baremetal nodes.
- self.system_reader_authorized_contexts = [
+ # With legacy rule and scope check disabled by default, system admin,
+ # legacy admin, and project admin will be able to get baremetal nodes.
+ self.project_admin_authorized_contexts = [
self.legacy_admin_context, self.system_admin_context,
- self.project_admin_context, self.system_member_context,
- self.system_reader_context]
- # Check that non-system-reader is not able to get baremetal nodes.
- self.system_reader_unauthorized_contexts = [
- self.system_foo_context, self.project_member_context,
- self.other_project_member_context,
- self.project_foo_context, self.project_reader_context,
- self.other_project_reader_context
- ]
+ self.project_admin_context]
def test_index_nodes_policy(self):
rule_name = "os_compute_api:os-baremetal-nodes:list"
- self.common_policy_check(self.system_reader_authorized_contexts,
- self.system_reader_unauthorized_contexts,
- rule_name, self.controller.index,
- self.req)
+ self.common_policy_auth(self.project_admin_authorized_contexts,
+ rule_name, self.controller.index,
+ self.req)
@mock.patch.object(FAKE_IRONIC_CLIENT.node, 'list_ports')
@mock.patch.object(FAKE_IRONIC_CLIENT.node, 'get')
@@ -69,11 +62,22 @@ class BaremetalNodesPolicyTest(base.BasePolicyTest):
mock_get.return_value = node
mock_port.return_value = []
- self.common_policy_check(self.system_reader_authorized_contexts,
- self.system_reader_unauthorized_contexts,
- rule_name,
- self.controller.show,
- self.req, uuids.fake_id)
+ self.common_policy_auth(self.project_admin_authorized_contexts,
+ rule_name,
+ self.controller.show,
+ self.req, uuids.fake_id)
+
+
+class BaremetalNodesNoLegacyNoScopePolicyTest(BaremetalNodesPolicyTest):
+ """Test Baremetal Nodes APIs policies with no legacy deprecated rules
+ and no scope checks which means new defaults only. In that case
+ system admin, legacy admin, and project admin will be able to get
+ Baremetal nodes Legacy admin will be allowed as policy is just admin if
+ no scope checks.
+
+ """
+
+ without_deprecated_rules = True
class BaremetalNodesScopeTypePolicyTest(BaremetalNodesPolicyTest):
@@ -91,28 +95,21 @@ class BaremetalNodesScopeTypePolicyTest(BaremetalNodesPolicyTest):
super(BaremetalNodesScopeTypePolicyTest, self).setUp()
self.flags(enforce_scope=True, group="oslo_policy")
- # Check that system reader is able to get baremetal nodes.
- self.system_reader_authorized_contexts = [
- self.system_admin_context, self.system_member_context,
- self.system_reader_context]
- # Check that non-system or non-reader is not able to get
- # baremetal nodes.
- self.system_reader_unauthorized_contexts = [
- self.legacy_admin_context, self.system_foo_context,
- self.project_admin_context, self.project_member_context,
- self.other_project_member_context,
- self.project_foo_context, self.project_reader_context,
- self.other_project_reader_context
- ]
-
-
-class BaremetalNodesNoLegacyPolicyTest(BaremetalNodesScopeTypePolicyTest):
- """Test Baremetal Nodes APIs policies with system scope enabled,
- and no more deprecated rules.
+ # With scope checks enable, only project-scoped admins are
+ # able to get baremetal nodes.
+ self.project_admin_authorized_contexts = [self.legacy_admin_context,
+ self.project_admin_context]
+
+
+class BNScopeTypeNoLegacyPolicyTest(BaremetalNodesScopeTypePolicyTest):
+ """Test Baremetal Nodes APIs policies with no legacy deprecated rules
+ and scope checks enabled which means scope + new defaults so
+ only system admin is able to get baremetal nodes.
"""
+
without_deprecated_rules = True
rules_without_deprecation = {
policies.BASE_POLICY_NAME % 'list':
- base_policy.SYSTEM_READER,
+ base_policy.ADMIN,
policies.BASE_POLICY_NAME % 'show':
- base_policy.SYSTEM_READER}
+ base_policy.ADMIN}
diff --git a/nova/tests/unit/policies/test_console_auth_tokens.py b/nova/tests/unit/policies/test_console_auth_tokens.py
index 27dbd59540..a658816538 100644
--- a/nova/tests/unit/policies/test_console_auth_tokens.py
+++ b/nova/tests/unit/policies/test_console_auth_tokens.py
@@ -10,7 +10,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from nova.api.openstack.compute import console_auth_tokens
from nova.tests.unit.api.openstack import fakes
@@ -31,33 +31,29 @@ class ConsoleAuthTokensPolicyTest(base.BasePolicyTest):
self.controller = console_auth_tokens.ConsoleAuthTokensController()
self.req = fakes.HTTPRequest.blank('', version='2.31')
- # Check that system reader is able to get console connection
- # information.
+ # With legacy rule, any admin can get console connection
# NOTE(gmann): Until old default rule which is admin_api is
# deprecated and not removed, project admin and legacy admin
# will be able to get console. This make sure that existing
- # tokens will keep working even we have changed this policy defaults
- # to reader role.
- self.reader_authorized_contexts = [
- self.system_admin_context, self.system_member_context,
- self.system_reader_context, self.legacy_admin_context,
+ # tokens will keep working.
+ self.project_admin_authorized_contexts = [
+ self.legacy_admin_context, self.system_admin_context,
self.project_admin_context]
- # Check that non-admin is not able to get console connection
- # information.
- self.reader_unauthorized_contexts = [
- self.system_foo_context, self.other_project_member_context,
- self.project_foo_context, self.project_member_context,
- self.project_reader_context,
- self.other_project_reader_context,
- ]
@mock.patch('nova.objects.ConsoleAuthToken.validate')
def test_console_connect_info_token_policy(self, mock_validate):
rule_name = "os_compute_api:os-console-auth-tokens"
- self.common_policy_check(self.reader_authorized_contexts,
- self.reader_unauthorized_contexts,
- rule_name, self.controller.show,
- self.req, fakes.FAKE_UUID)
+ self.common_policy_auth(self.project_admin_authorized_contexts,
+ rule_name, self.controller.show,
+ self.req, fakes.FAKE_UUID)
+
+
+class ConsoleAuthTokensNoLegacyNoScopeTest(ConsoleAuthTokensPolicyTest):
+ """Test Console Auth Tokens API policies with deprecated rules
+ disabled, but scope checking still disabled.
+ """
+
+ without_deprecated_rules = True
class ConsoleAuthTokensScopeTypePolicyTest(ConsoleAuthTokensPolicyTest):
@@ -75,17 +71,14 @@ class ConsoleAuthTokensScopeTypePolicyTest(ConsoleAuthTokensPolicyTest):
super(ConsoleAuthTokensScopeTypePolicyTest, self).setUp()
self.flags(enforce_scope=True, group="oslo_policy")
- # Check that system reader is able to get console connection
- # information.
- self.reader_authorized_contexts = [
- self.system_admin_context, self.system_member_context,
- self.system_reader_context]
- # Check that non-system-reader is not able to get console connection
- # information.
- self.reader_unauthorized_contexts = [
- self.legacy_admin_context, self.system_foo_context,
- self.project_admin_context, self.project_member_context,
- self.other_project_member_context,
- self.project_foo_context, self.project_reader_context,
- self.other_project_reader_context,
- ]
+ # With scope enabled, system admin is not allowed.
+ self.project_admin_authorized_contexts = [
+ self.legacy_admin_context, self.project_admin_context]
+
+
+class ConsoleAuthTokensScopeTypeNoLegacyPolicyTest(
+ ConsoleAuthTokensScopeTypePolicyTest):
+ """Test Console Auth Tokens APIs policies with system scope enabled,
+ and no more deprecated rules.
+ """
+ without_deprecated_rules = True
diff --git a/nova/tests/unit/policies/test_console_output.py b/nova/tests/unit/policies/test_console_output.py
index 3c16f5c1fa..c1bccf1d55 100644
--- a/nova/tests/unit/policies/test_console_output.py
+++ b/nova/tests/unit/policies/test_console_output.py
@@ -10,8 +10,9 @@
# License for the specific language governing permissions and limitations
# under the License.
+from unittest import mock
+
import fixtures
-import mock
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import timeutils
@@ -43,30 +44,37 @@ class ConsoleOutputPolicyTest(base.BasePolicyTest):
id=1, uuid=uuid, vm_state=vm_states.ACTIVE,
task_state=None, launched_at=timeutils.utcnow())
self.mock_get.return_value = self.instance
- # Check that admin or owner is able to get the server console.
- self.admin_authorized_contexts = [
+ # With legacy rule, any admin and role in project
+ # can get the server console.
+ self.project_member_authorized_contexts = [
self.legacy_admin_context, self.system_admin_context,
self.project_admin_context, self.project_member_context,
- self.project_reader_context, self.project_foo_context
- ]
- # Check that non-admin and non-owner is not able to get the server
- # console.
- self.admin_unauthorized_contexts = [
- self.system_member_context, self.system_reader_context,
- self.system_foo_context,
- self.other_project_member_context,
- self.other_project_reader_context,
- ]
+ self.project_reader_context, self.project_foo_context]
@mock.patch('nova.compute.api.API.get_console_output')
def test_console_output_policy(self, mock_console):
mock_console.return_value = '\n'.join([str(i) for i in range(2)])
rule_name = "os_compute_api:os-console-output"
- self.common_policy_check(self.admin_authorized_contexts,
- self.admin_unauthorized_contexts,
- rule_name, self.controller.get_console_output,
- self.req, self.instance.uuid,
- body={'os-getConsoleOutput': {}})
+ self.common_policy_auth(self.project_member_authorized_contexts,
+ rule_name, self.controller.get_console_output,
+ self.req, self.instance.uuid,
+ body={'os-getConsoleOutput': {}})
+
+
+class ConsoleOutputNoLegacyNoScopePolicyTest(ConsoleOutputPolicyTest):
+ """Test Server Console Output APIs policies with no legacy deprecated
+ rule and no scope check.
+
+ """
+
+ without_deprecated_rules = True
+
+ def setUp(self):
+ super(ConsoleOutputNoLegacyNoScopePolicyTest, self).setUp()
+ # With no legacy rule, only project admin or member is able to
+ # get the server console.
+ self.project_member_authorized_contexts = (
+ self.project_member_or_admin_with_no_scope_no_legacy)
class ConsoleOutputScopeTypePolicyTest(ConsoleOutputPolicyTest):
@@ -83,31 +91,22 @@ class ConsoleOutputScopeTypePolicyTest(ConsoleOutputPolicyTest):
def setUp(self):
super(ConsoleOutputScopeTypePolicyTest, self).setUp()
self.flags(enforce_scope=True, group="oslo_policy")
+ # Scope enable will not allow system admin.
+ self.project_member_authorized_contexts = (
+ self.project_m_r_or_admin_with_scope_and_legacy)
-class ConsoleOutputNoLegacyPolicyTest(ConsoleOutputPolicyTest):
+class ConsoleOutputScopeTypeNoLegacyPolicyTest(
+ ConsoleOutputScopeTypePolicyTest):
"""Test Console Output APIs policies with system scope enabled,
- and no more deprecated rules that allow the legacy admin API to
- access system_admin_or_owner APIs.
+ and no more deprecated rules.
"""
without_deprecated_rules = True
def setUp(self):
- super(ConsoleOutputNoLegacyPolicyTest, self).setUp()
- self.flags(enforce_scope=True, group="oslo_policy")
+ super(ConsoleOutputScopeTypeNoLegacyPolicyTest, self).setUp()
- # Check that system or projct admin or owner is able to
- # get the server console.
- self.admin_authorized_contexts = [
- self.system_admin_context,
- self.project_admin_context, self.project_member_context]
- # Check that non-system and non-admin/owner is not able to
+ # With scope enable and no legacy rule, only project admin/member can
# get the server console.
- self.admin_unauthorized_contexts = [
- self.legacy_admin_context, self.project_reader_context,
- self.project_foo_context,
- self.system_member_context, self.system_reader_context,
- self.system_foo_context,
- self.other_project_member_context,
- self.other_project_reader_context,
- ]
+ self.project_member_authorized_contexts = (
+ self.project_member_or_admin_with_scope_no_legacy)
diff --git a/nova/tests/unit/policies/test_create_backup.py b/nova/tests/unit/policies/test_create_backup.py
index 4985119201..b54ed366df 100644
--- a/nova/tests/unit/policies/test_create_backup.py
+++ b/nova/tests/unit/policies/test_create_backup.py
@@ -10,8 +10,9 @@
# License for the specific language governing permissions and limitations
# under the License.
+from unittest import mock
+
import fixtures
-import mock
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import timeutils
@@ -43,20 +44,14 @@ class CreateBackupPolicyTest(base.BasePolicyTest):
id=1, uuid=uuid, vm_state=vm_states.ACTIVE,
task_state=None, launched_at=timeutils.utcnow())
self.mock_get.return_value = self.instance
- # Check that admin or owner is able to create server backup.
- self.admin_authorized_contexts = [
+ # With legacy rule and no scope checks, all admin, project members
+ # project reader or other project role(because legacy rule allow server
+ # owner- having same project id and no role check) is able to create
+ # server backup.
+ self.project_member_authorized_contexts = [
self.legacy_admin_context, self.system_admin_context,
self.project_admin_context, self.project_member_context,
- self.project_reader_context, self.project_foo_context
- ]
- # Check that non-admin and non-owner is not able to create server
- # backup.
- self.admin_unauthorized_contexts = [
- self.system_member_context, self.system_reader_context,
- self.system_foo_context,
- self.other_project_member_context,
- self.other_project_reader_context,
- ]
+ self.project_reader_context, self.project_foo_context]
@mock.patch('nova.compute.api.API.backup')
def test_create_backup_policy(self, mock_backup):
@@ -68,11 +63,26 @@ class CreateBackupPolicyTest(base.BasePolicyTest):
'rotation': 1,
},
}
- self.common_policy_check(self.admin_authorized_contexts,
- self.admin_unauthorized_contexts,
- rule_name, self.controller._create_backup,
- self.req, self.instance.uuid,
- body=body)
+ self.common_policy_auth(self.project_member_authorized_contexts,
+ rule_name, self.controller._create_backup,
+ self.req, self.instance.uuid,
+ body=body)
+
+
+class CreateBackupNoLegacyNoScopePolicyTest(CreateBackupPolicyTest):
+ """Test Create Backup server APIs policies with no legacy deprecated rules
+ and no scope checks.
+
+ """
+
+ without_deprecated_rules = True
+
+ def setUp(self):
+ super(CreateBackupNoLegacyNoScopePolicyTest, self).setUp()
+ # With no legacy rule, only project admin or member will be
+ # able to create the server backup.
+ self.project_member_authorized_contexts = (
+ self.project_member_or_admin_with_no_scope_no_legacy)
class CreateBackupScopeTypePolicyTest(CreateBackupPolicyTest):
@@ -89,31 +99,20 @@ class CreateBackupScopeTypePolicyTest(CreateBackupPolicyTest):
def setUp(self):
super(CreateBackupScopeTypePolicyTest, self).setUp()
self.flags(enforce_scope=True, group="oslo_policy")
+ # Scope enable will not allow system users to create the server.
+ self.project_member_authorized_contexts = (
+ self.project_m_r_or_admin_with_scope_and_legacy)
-class CreateBackupNoLegacyPolicyTest(CreateBackupPolicyTest):
+class CreateBackupScopeTypeNoLegacyPolicyTest(CreateBackupScopeTypePolicyTest):
"""Test Create Backup APIs policies with system scope enabled,
- and no more deprecated rules that allow the legacy admin API to
- access system_admin_or_owner APIs.
+ and no more deprecated rules.
"""
without_deprecated_rules = True
def setUp(self):
- super(CreateBackupNoLegacyPolicyTest, self).setUp()
- self.flags(enforce_scope=True, group="oslo_policy")
-
- # Check that system or projct admin or owner is able to create
- # server backup.
- self.admin_authorized_contexts = [
- self.system_admin_context,
- self.project_admin_context, self.project_member_context]
- # Check that non-system and non-admin/owner is not able to
- # create server backup.
- self.admin_unauthorized_contexts = [
- self.legacy_admin_context, self.project_reader_context,
- self.project_foo_context,
- self.system_member_context, self.system_reader_context,
- self.system_foo_context,
- self.other_project_member_context,
- self.other_project_reader_context,
- ]
+ super(CreateBackupScopeTypeNoLegacyPolicyTest, self).setUp()
+ # With scope enable and no legacy rule, only project admin/member
+ # will be able to create the server backup.
+ self.project_member_authorized_contexts = (
+ self.project_member_or_admin_with_scope_no_legacy)
diff --git a/nova/tests/unit/policies/test_deferred_delete.py b/nova/tests/unit/policies/test_deferred_delete.py
index ca2253df54..08bb0213f4 100644
--- a/nova/tests/unit/policies/test_deferred_delete.py
+++ b/nova/tests/unit/policies/test_deferred_delete.py
@@ -10,8 +10,9 @@
# License for the specific language governing permissions and limitations
# under the License.
+from unittest import mock
+
import fixtures
-import mock
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import timeutils
@@ -47,37 +48,29 @@ class DeferredDeletePolicyTest(base.BasePolicyTest):
id=1, uuid=uuid, user_id=user_id, vm_state=vm_states.ACTIVE,
task_state=None, launched_at=timeutils.utcnow())
self.mock_get.return_value = self.instance
- # Check that admin or owner is able to force delete or restore server.
- self.admin_authorized_contexts = [
+ # With legacy rule and no scope checks, all admin, project members
+ # project reader or other project role(because legacy rule allow server
+ # owner- having same project id and no role check) is able to force
+ # delete or restore server.
+ self.project_member_authorized_contexts = [
self.legacy_admin_context, self.system_admin_context,
self.project_admin_context, self.project_member_context,
- self.project_reader_context, self.project_foo_context
- ]
- # Check that non-admin and non-owner is not able to force delete or
- # restore server.
- self.admin_unauthorized_contexts = [
- self.system_member_context, self.system_reader_context,
- self.system_foo_context,
- self.other_project_member_context,
- self.other_project_reader_context,
- ]
+ self.project_reader_context, self.project_foo_context]
@mock.patch('nova.compute.api.API.restore')
def test_restore_server_policy(self, mock_restore):
rule_name = dd_policies.BASE_POLICY_NAME % 'restore'
- self.common_policy_check(self.admin_authorized_contexts,
- self.admin_unauthorized_contexts,
- rule_name, self.controller._restore,
- self.req, self.instance.uuid,
- body={'restore': {}})
+ self.common_policy_auth(self.project_member_authorized_contexts,
+ rule_name, self.controller._restore,
+ self.req, self.instance.uuid,
+ body={'restore': {}})
def test_force_delete_server_policy(self):
rule_name = dd_policies.BASE_POLICY_NAME % 'force'
- self.common_policy_check(self.admin_authorized_contexts,
- self.admin_unauthorized_contexts,
- rule_name, self.controller._force_delete,
- self.req, self.instance.uuid,
- body={'forceDelete': {}})
+ self.common_policy_auth(self.project_member_authorized_contexts,
+ rule_name, self.controller._force_delete,
+ self.req, self.instance.uuid,
+ body={'forceDelete': {}})
def test_force_delete_server_policy_failed_with_other_user(self):
rule_name = dd_policies.BASE_POLICY_NAME % 'force'
@@ -103,6 +96,27 @@ class DeferredDeletePolicyTest(base.BasePolicyTest):
self.req.environ['nova.context'], self.instance)
+class DeferredDeleteNoLegacyNoScopePolicyTest(DeferredDeletePolicyTest):
+ """Test Deferred Delete server APIs policies with no legacy deprecated
+ rule and no scope check.
+
+ """
+
+ without_deprecated_rules = True
+ rules_without_deprecation = {
+ dd_policies.BASE_POLICY_NAME % 'restore':
+ base_policy.PROJECT_MEMBER_OR_ADMIN,
+ dd_policies.BASE_POLICY_NAME % 'force':
+ base_policy.PROJECT_MEMBER_OR_ADMIN}
+
+ def setUp(self):
+ super(DeferredDeleteNoLegacyNoScopePolicyTest, self).setUp()
+ # With no legacy rule, only project admin or member is able to force
+ # delete or restore server.
+ self.project_member_authorized_contexts = (
+ self.project_member_or_admin_with_no_scope_no_legacy)
+
+
class DeferredDeleteScopeTypePolicyTest(DeferredDeletePolicyTest):
"""Test Deferred Delete APIs policies with system scope enabled.
@@ -117,36 +131,27 @@ class DeferredDeleteScopeTypePolicyTest(DeferredDeletePolicyTest):
def setUp(self):
super(DeferredDeleteScopeTypePolicyTest, self).setUp()
self.flags(enforce_scope=True, group="oslo_policy")
+ # Scope enable will not allow system admin.
+ self.project_member_authorized_contexts = (
+ self.project_m_r_or_admin_with_scope_and_legacy)
-class DeferredDeleteNoLegacyPolicyTest(DeferredDeletePolicyTest):
+class DeferredDeleteScopeTypeNoLegacyPolicyTest(
+ DeferredDeleteScopeTypePolicyTest):
"""Test Deferred Delete APIs policies with system scope enabled,
- and no more deprecated rules that allow the legacy admin API to
- access system_admin_or_owner APIs.
+ and no more deprecated rules.
"""
without_deprecated_rules = True
rules_without_deprecation = {
dd_policies.BASE_POLICY_NAME % 'restore':
- base_policy.PROJECT_MEMBER_OR_SYSTEM_ADMIN,
+ base_policy.PROJECT_MEMBER_OR_ADMIN,
dd_policies.BASE_POLICY_NAME % 'force':
- base_policy.PROJECT_MEMBER_OR_SYSTEM_ADMIN}
+ base_policy.PROJECT_MEMBER_OR_ADMIN}
def setUp(self):
- super(DeferredDeleteNoLegacyPolicyTest, self).setUp()
+ super(DeferredDeleteScopeTypeNoLegacyPolicyTest, self).setUp()
self.flags(enforce_scope=True, group="oslo_policy")
-
- # Check that system or projct admin or owner is able to
- # force delete or restore server.
- self.admin_authorized_contexts = [
- self.system_admin_context,
- self.project_admin_context, self.project_member_context]
- # Check that non-system and non-admin/owner is not able to
- # force delete or restore server.
- self.admin_unauthorized_contexts = [
- self.legacy_admin_context, self.project_reader_context,
- self.project_foo_context,
- self.system_member_context, self.system_reader_context,
- self.system_foo_context,
- self.other_project_member_context,
- self.other_project_reader_context,
- ]
+ # With scope enable and no legacy rule, only project admin/member is
+ # able to force delete or restore server.
+ self.project_member_authorized_contexts = (
+ self.project_member_or_admin_with_scope_no_legacy)
diff --git a/nova/tests/unit/policies/test_evacuate.py b/nova/tests/unit/policies/test_evacuate.py
index 203cc136e9..b9e4c29dba 100644
--- a/nova/tests/unit/policies/test_evacuate.py
+++ b/nova/tests/unit/policies/test_evacuate.py
@@ -10,8 +10,9 @@
# License for the specific language governing permissions and limitations
# under the License.
+from unittest import mock
+
import fixtures
-import mock
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import timeutils
@@ -55,18 +56,12 @@ class EvacuatePolicyTest(base.BasePolicyTest):
id=1, uuid=uuid, user_id=user_id, vm_state=vm_states.ACTIVE,
task_state=None, launched_at=timeutils.utcnow())
self.mock_get.return_value = self.instance
- # Check that admin is able to evacuate the server
- self.admin_authorized_contexts = [
+ # By default, legacy rule are enable and scope check is disabled.
+ # system admin, legacy admin, and project admin is able to evacuate
+ # the server.
+ self.project_action_authorized_contexts = [
self.legacy_admin_context, self.system_admin_context,
self.project_admin_context]
- # Check that non-admin is not able to evacuate the server
- self.admin_unauthorized_contexts = [
- self.system_member_context, self.system_reader_context,
- self.system_foo_context, self.project_member_context,
- self.other_project_member_context,
- self.other_project_reader_context,
- self.project_foo_context, self.project_reader_context
- ]
@mock.patch('nova.compute.api.API.evacuate')
def test_evacuate_policy(self, mock_evacuate):
@@ -75,11 +70,10 @@ class EvacuatePolicyTest(base.BasePolicyTest):
'onSharedStorage': 'False',
'adminPass': 'admin_pass'}
}
- self.common_policy_check(self.admin_authorized_contexts,
- self.admin_unauthorized_contexts,
- rule_name, self.controller._evacuate,
- self.req, uuids.fake_id,
- body=body)
+ self.common_policy_auth(self.project_action_authorized_contexts,
+ rule_name, self.controller._evacuate,
+ self.req, uuids.fake_id,
+ body=body)
def test_evacuate_policy_failed_with_other_user(self):
rule_name = "os_compute_api:os-evacuate"
@@ -109,7 +103,16 @@ class EvacuatePolicyTest(base.BasePolicyTest):
evacuate_mock.assert_called_once_with(
self.user_req.environ['nova.context'],
mock.ANY, 'my-host', False,
- 'MyNewPass', None)
+ 'MyNewPass', None, None)
+
+
+class EvacuateNoLegacyNoScopePolicyTest(EvacuatePolicyTest):
+ """Test Evacuate APIs policies with no legacy deprecated rules
+ and no scope checks which means new defaults only.
+
+ """
+
+ without_deprecated_rules = True
class EvacuateScopeTypePolicyTest(EvacuatePolicyTest):
@@ -126,28 +129,14 @@ class EvacuateScopeTypePolicyTest(EvacuatePolicyTest):
def setUp(self):
super(EvacuateScopeTypePolicyTest, self).setUp()
self.flags(enforce_scope=True, group="oslo_policy")
+ # With scope enable, system admin will not be able to
+ # evacuate the server.
+ self.project_action_authorized_contexts = [
+ self.legacy_admin_context, self.project_admin_context]
-class EvacuateNoLegacyPolicyTest(EvacuateScopeTypePolicyTest):
+class EvacuateScopeTypeNoLegacyPolicyTest(EvacuateScopeTypePolicyTest):
"""Test Evacuate APIs policies with system scope enabled,
- and no more deprecated rules that allow the legacy admin API to
- access system APIs.
+ and no more deprecated rules which means scope + new defaults.
"""
without_deprecated_rules = True
-
- def setUp(self):
- super(EvacuateNoLegacyPolicyTest, self).setUp()
-
- # Check that system admin is able to evacuate server.
- self.admin_authorized_contexts = [
- self.system_admin_context]
- # Check that non-system or non-admin is not able to evacuate
- # server.
- self.admin_unauthorized_contexts = [
- self.legacy_admin_context, self.system_member_context,
- self.system_reader_context, self.system_foo_context,
- self.project_admin_context, self.project_member_context,
- self.other_project_member_context,
- self.other_project_reader_context,
- self.project_foo_context, self.project_reader_context
- ]
diff --git a/nova/tests/unit/policies/test_extensions.py b/nova/tests/unit/policies/test_extensions.py
index 35f451087d..d2e3c6adde 100644
--- a/nova/tests/unit/policies/test_extensions.py
+++ b/nova/tests/unit/policies/test_extensions.py
@@ -71,11 +71,20 @@ class ExtensionsScopeTypePolicyTest(ExtensionsPolicyTest):
def setUp(self):
super(ExtensionsScopeTypePolicyTest, self).setUp()
self.flags(enforce_scope=True, group="oslo_policy")
+ self.everyone_authorized_contexts = [
+ self.legacy_admin_context,
+ self.project_admin_context, self.project_member_context,
+ self.project_reader_context, self.project_foo_context,
+ self.other_project_reader_context,
+ self.other_project_member_context
+ ]
+ self.everyone_unauthorized_contexts = [
+ self.system_admin_context, self.system_member_context,
+ self.system_reader_context, self.system_foo_context]
class ExtensionsNoLegacyPolicyTest(ExtensionsScopeTypePolicyTest):
"""Test Extensions APIs policies with system scope enabled,
- and no more deprecated rules that allow the legacy admin API to
- access system_admin_or_owner APIs.
+ and no more deprecated rules.
"""
without_deprecated_rules = True
diff --git a/nova/tests/unit/policies/test_flavor_access.py b/nova/tests/unit/policies/test_flavor_access.py
index 46d9368c4b..cfdbbd2470 100644
--- a/nova/tests/unit/policies/test_flavor_access.py
+++ b/nova/tests/unit/policies/test_flavor_access.py
@@ -10,8 +10,9 @@
# License for the specific language governing permissions and limitations
# under the License.
+from unittest import mock
+
import fixtures
-import mock
from oslo_utils.fixture import uuidsentinel as uuids
from nova.api.openstack.compute import flavor_access
@@ -49,62 +50,61 @@ class FlavorAccessPolicyTest(base.BasePolicyTest):
self.stub_out('nova.objects.flavor._get_projects_from_db',
lambda context, flavorid: [])
- # Check that admin is able to add/remove flavor access
- # to a tenant.
+ # With legacy rule and no scope checks, all admin is able to
+ # add/remove flavor access to a tenant.
self.admin_authorized_contexts = [
self.legacy_admin_context, self.system_admin_context,
self.project_admin_context]
- # Check that non-admin is not able to add/remove flavor access
- # to a tenant.
- self.admin_unauthorized_contexts = [
- self.system_member_context, self.system_reader_context,
- self.system_foo_context, self.project_member_context,
- self.other_project_member_context,
- self.other_project_reader_context,
- self.project_foo_context, self.project_reader_context
- ]
-
- # Check that everyone is able to list flavor access
- # information which is nothing but bug#1867840.
- self.reader_authorized_contexts = [
- self.legacy_admin_context, self.system_admin_context,
- self.project_admin_context, self.project_member_context,
- self.project_reader_context, self.project_foo_context,
- self.system_member_context, self.system_reader_context,
- self.system_foo_context,
- self.other_project_member_context,
- self.other_project_reader_context,
- ]
- self.reader_unauthorized_contexts = [
- ]
+ # With legacy rule, anyone can access flavor access info.
+ self.admin_index_authorized_contexts = self.all_contexts
def test_list_flavor_access_policy(self):
rule_name = fa_policy.BASE_POLICY_NAME
- self.common_policy_check(self.reader_authorized_contexts,
- self.reader_unauthorized_contexts,
- rule_name, self.controller_index.index,
- self.req, '1')
+ self.common_policy_auth(self.admin_index_authorized_contexts,
+ rule_name, self.controller_index.index,
+ self.req, '1')
@mock.patch('nova.objects.Flavor.add_access')
def test_add_tenant_access_policy(self, mock_add):
rule_name = fa_policy.POLICY_ROOT % "add_tenant_access"
- self.common_policy_check(self.admin_authorized_contexts,
- self.admin_unauthorized_contexts,
- rule_name,
- self.controller._add_tenant_access,
- self.req, '1',
- body={'addTenantAccess': {'tenant': 't1'}})
+ self.common_policy_auth(self.admin_authorized_contexts,
+ rule_name,
+ self.controller._add_tenant_access,
+ self.req, '1',
+ body={'addTenantAccess': {'tenant': 't1'}})
@mock.patch('nova.objects.Flavor.remove_access')
def test_remove_tenant_access_policy(self, mock_remove):
rule_name = fa_policy.POLICY_ROOT % "remove_tenant_access"
- self.common_policy_check(self.admin_authorized_contexts,
- self.admin_unauthorized_contexts,
- rule_name,
- self.controller._remove_tenant_access,
- self.req, '1',
- body={'removeTenantAccess': {'tenant': 't1'}})
+ self.common_policy_auth(self.admin_authorized_contexts,
+ rule_name,
+ self.controller._remove_tenant_access,
+ self.req, '1',
+ body={'removeTenantAccess': {'tenant': 't1'}})
+
+
+class FlavorAccessNoLegacyNoScopeTest(FlavorAccessPolicyTest):
+ """Test Flavor Access API policies with deprecated rules
+ disabled, but scope checking still disabled.
+ """
+
+ without_deprecated_rules = True
+ rules_without_deprecation = {
+ fa_policy.POLICY_ROOT % "add_tenant_access":
+ base_policy.ADMIN,
+ fa_policy.POLICY_ROOT % "remove_tenant_access":
+ base_policy.ADMIN,
+ fa_policy.BASE_POLICY_NAME:
+ base_policy.ADMIN}
+
+ def setUp(self):
+ super(FlavorAccessNoLegacyNoScopeTest, self).setUp()
+
+ # with no legacy rule means all admin is able to list access info.
+ self.admin_index_authorized_contexts = [
+ self.legacy_admin_context, self.system_admin_context,
+ self.project_admin_context]
class FlavorAccessScopeTypePolicyTest(FlavorAccessPolicyTest):
@@ -122,81 +122,32 @@ class FlavorAccessScopeTypePolicyTest(FlavorAccessPolicyTest):
super(FlavorAccessScopeTypePolicyTest, self).setUp()
self.flags(enforce_scope=True, group="oslo_policy")
- # Check that system admin is able to add/remove flavor access
- # to a tenant.
+ # Scope checks remove system users' power.
self.admin_authorized_contexts = [
- self.system_admin_context]
- # Check that non-system-admin is not able to add/remove flavor access
- # to a tenant.
- self.admin_unauthorized_contexts = [
- self.legacy_admin_context, self.system_member_context,
- self.system_reader_context, self.project_admin_context,
- self.system_foo_context, self.project_member_context,
- self.other_project_member_context,
- self.other_project_reader_context,
- self.project_foo_context, self.project_reader_context
- ]
-
- # Check that system user is able to list flavor access
- # information.
- self.reader_authorized_contexts = [
- self.system_admin_context,
- self.system_member_context, self.system_reader_context,
- self.system_foo_context]
- # Check that non-system is not able to list flavor access
- # information.
- self.reader_unauthorized_contexts = [
- self.legacy_admin_context, self.other_project_member_context,
- self.project_admin_context, self.project_member_context,
- self.project_reader_context, self.project_foo_context,
- self.other_project_reader_context,
- ]
-
-
-class FlavorAccessNoLegacyPolicyTest(FlavorAccessPolicyTest):
+ self.legacy_admin_context,
+ self.project_admin_context]
+ self.admin_index_authorized_contexts = self.all_project_contexts
+
+
+class FlavorAccessScopeTypeNoLegacyPolicyTest(FlavorAccessScopeTypePolicyTest):
"""Test FlavorAccess APIs policies with system scope enabled,
- and no more deprecated rules that allow the legacy admin API to
- access system_redear APIs.
+ and no more deprecated rules.
"""
without_deprecated_rules = True
rules_without_deprecation = {
fa_policy.POLICY_ROOT % "add_tenant_access":
- base_policy.SYSTEM_ADMIN,
+ base_policy.ADMIN,
fa_policy.POLICY_ROOT % "remove_tenant_access":
- base_policy.SYSTEM_ADMIN,
+ base_policy.ADMIN,
fa_policy.BASE_POLICY_NAME:
- base_policy.SYSTEM_READER}
+ base_policy.ADMIN}
def setUp(self):
- super(FlavorAccessNoLegacyPolicyTest, self).setUp()
+ super(FlavorAccessScopeTypeNoLegacyPolicyTest, self).setUp()
self.flags(enforce_scope=True, group="oslo_policy")
- # Check that system admin is able to add/remove flavor access
- # to a tenant.
+ # New defaults make this admin-only
self.admin_authorized_contexts = [
- self.system_admin_context]
- # Check that non-system-admin is not able to add/remove flavor access
- # to a tenant.
- self.admin_unauthorized_contexts = [
- self.legacy_admin_context, self.system_member_context,
- self.system_reader_context, self.project_admin_context,
- self.system_foo_context, self.project_member_context,
- self.other_project_member_context,
- self.other_project_reader_context,
- self.project_foo_context, self.project_reader_context
- ]
-
- # Check that system reader is able to list flavor access
- # information.
- self.reader_authorized_contexts = [
- self.system_admin_context,
- self.system_member_context, self.system_reader_context]
- # Check that non-system-reader is not able to list flavor access
- # information.
- self.reader_unauthorized_contexts = [
- self.legacy_admin_context, self.other_project_member_context,
- self.project_admin_context, self.project_member_context,
- self.project_reader_context, self.project_foo_context,
- self.system_foo_context,
- self.other_project_reader_context,
- ]
+ self.legacy_admin_context,
+ self.project_admin_context]
+ self.admin_index_authorized_contexts = self.admin_authorized_contexts
diff --git a/nova/tests/unit/policies/test_flavor_extra_specs.py b/nova/tests/unit/policies/test_flavor_extra_specs.py
index 3129cb6213..f3c8cacd57 100644
--- a/nova/tests/unit/policies/test_flavor_extra_specs.py
+++ b/nova/tests/unit/policies/test_flavor_extra_specs.py
@@ -10,22 +10,17 @@
# License for the specific language governing permissions and limitations
# under the License.
-import fixtures
-import mock
+from unittest import mock
+
from oslo_utils.fixture import uuidsentinel as uuids
from nova.api.openstack.compute import flavor_manage
from nova.api.openstack.compute import flavors
from nova.api.openstack.compute import flavors_extraspecs
-from nova.api.openstack.compute import servers
-from nova.compute import vm_states
-from nova import objects
from nova.policies import flavor_extra_specs as policies
from nova.policies import flavor_manage as fm_policies
-from nova.policies import servers as s_policies
from nova.tests.unit.api.openstack import fakes
from nova.tests.unit import fake_flavor
-from nova.tests.unit import fake_instance
from nova.tests.unit.policies import base
@@ -42,30 +37,7 @@ class FlavorExtraSpecsPolicyTest(base.BasePolicyTest):
self.controller = flavors_extraspecs.FlavorExtraSpecsController()
self.flavor_ctrl = flavors.FlavorsController()
self.fm_ctrl = flavor_manage.FlavorManageController()
- self.server_ctrl = servers.ServersController()
self.req = fakes.HTTPRequest.blank('')
- self.server_ctrl._view_builder._add_security_grps = mock.MagicMock()
- self.server_ctrl._view_builder._get_metadata = mock.MagicMock()
- self.server_ctrl._view_builder._get_addresses = mock.MagicMock()
- self.server_ctrl._view_builder._get_host_id = mock.MagicMock()
- self.server_ctrl._view_builder._get_fault = mock.MagicMock()
- self.server_ctrl._view_builder._add_host_status = mock.MagicMock()
-
- self.instance = fake_instance.fake_instance_obj(
- self.project_member_context,
- id=1, uuid=uuids.fake_id, project_id=self.project_id,
- vm_state=vm_states.ACTIVE)
-
- self.mock_get = self.useFixture(
- fixtures.MockPatch('nova.api.openstack.common.get_instance')).mock
- self.mock_get.return_value = self.instance
-
- fakes.stub_out_secgroup_api(
- self, security_groups=[{'name': 'default'}])
- self.mock_get_all = self.useFixture(fixtures.MockPatchObject(
- self.server_ctrl.compute_api, 'get_all')).mock
- self.mock_get_all.return_value = objects.InstanceList(
- objects=[self.instance])
def get_flavor_extra_specs(context, flavor_id):
return fake_flavor.fake_flavor_obj(
@@ -77,99 +49,72 @@ class FlavorExtraSpecsPolicyTest(base.BasePolicyTest):
self.stub_out('nova.api.openstack.common.get_flavor',
get_flavor_extra_specs)
- # Check that all are able to get flavor extra specs.
- self.all_authorized_contexts = [
- self.legacy_admin_context, self.system_admin_context,
- self.project_admin_context, self.project_member_context,
- self.project_reader_context, self.project_foo_context,
- self.system_member_context, self.system_reader_context,
- self.system_foo_context,
- self.other_project_member_context,
- self.other_project_reader_context
- ]
- self.all_unauthorized_contexts = []
- # Check that all system scoped are able to get flavor extra specs.
- self.all_system_authorized_contexts = [
- self.legacy_admin_context, self.system_admin_context,
- self.project_admin_context, self.project_member_context,
- self.project_reader_context, self.project_foo_context,
- self.system_member_context, self.system_reader_context,
- self.system_foo_context,
- self.other_project_member_context,
- self.other_project_reader_context
- ]
- self.all_system_unauthorized_contexts = []
-
- # Check that admin is able to create, update and delete flavor
- # extra specs.
- self.admin_authorized_contexts = [
- self.legacy_admin_context, self.system_admin_context,
- self.project_admin_context]
- # Check that non-admin is not able to create, update and
- # delete flavor extra specs.
- self.admin_unauthorized_contexts = [
- self.system_member_context, self.system_reader_context,
- self.system_foo_context, self.project_member_context,
- self.project_reader_context, self.project_foo_context,
- self.other_project_member_context,
- self.other_project_reader_context
- ]
+ # In the base/legacy case, all project and system contexts are
+ # authorized in the "anyone" case.
+ self.all_authorized_contexts = (self.all_project_contexts |
+ self.all_system_contexts)
+
+ # In the base/legacy case, all project and system contexts are
+ # authorized in the case of things that distinguish between
+ # scopes, since scope checking is disabled.
+ self.all_project_authorized_contexts = (self.all_project_contexts |
+ self.all_system_contexts)
+
+ # In the base/legacy case, any admin is an admin.
+ self.admin_authorized_contexts = set([self.project_admin_context,
+ self.system_admin_context,
+ self.legacy_admin_context])
@mock.patch('nova.objects.Flavor.save')
def test_create_flavor_extra_specs_policy(self, mock_save):
body = {'extra_specs': {'hw:numa_nodes': '1'}}
rule_name = policies.POLICY_ROOT % 'create'
- self.common_policy_check(self.admin_authorized_contexts,
- self.admin_unauthorized_contexts,
- rule_name,
- self.controller.create,
- self.req, '1234',
- body=body)
+ self.common_policy_auth(self.admin_authorized_contexts,
+ rule_name,
+ self.controller.create,
+ self.req, '1234',
+ body=body)
@mock.patch('nova.objects.Flavor._flavor_extra_specs_del')
@mock.patch('nova.objects.Flavor.save')
def test_delete_flavor_extra_specs_policy(self, mock_save, mock_delete):
rule_name = policies.POLICY_ROOT % 'delete'
- self.common_policy_check(self.admin_authorized_contexts,
- self.admin_unauthorized_contexts,
- rule_name,
- self.controller.delete,
- self.req, '1234', 'hw:cpu_policy')
+ self.common_policy_auth(self.admin_authorized_contexts,
+ rule_name,
+ self.controller.delete,
+ self.req, '1234', 'hw:cpu_policy')
@mock.patch('nova.objects.Flavor.save')
def test_update_flavor_extra_specs_policy(self, mock_save):
body = {'hw:cpu_policy': 'shared'}
rule_name = policies.POLICY_ROOT % 'update'
- self.common_policy_check(self.admin_authorized_contexts,
- self.admin_unauthorized_contexts,
- rule_name,
- self.controller.update,
- self.req, '1234', 'hw:cpu_policy',
- body=body)
+ self.common_policy_auth(self.admin_authorized_contexts,
+ rule_name,
+ self.controller.update,
+ self.req, '1234', 'hw:cpu_policy',
+ body=body)
def test_show_flavor_extra_specs_policy(self):
rule_name = policies.POLICY_ROOT % 'show'
- self.common_policy_check(self.all_authorized_contexts,
- self.all_unauthorized_contexts,
- rule_name,
- self.controller.show,
- self.req, '1234',
- 'hw:cpu_policy')
+ self.common_policy_auth(self.all_authorized_contexts,
+ rule_name,
+ self.controller.show,
+ self.req, '1234',
+ 'hw:cpu_policy')
def test_index_flavor_extra_specs_policy(self):
rule_name = policies.POLICY_ROOT % 'index'
- self.common_policy_check(self.all_authorized_contexts,
- self.all_unauthorized_contexts,
- rule_name,
- self.controller.index,
- self.req, '1234')
+ self.common_policy_auth(self.all_authorized_contexts,
+ rule_name,
+ self.controller.index,
+ self.req, '1234')
def test_flavor_detail_with_extra_specs_policy(self):
fakes.stub_out_flavor_get_all(self)
rule_name = policies.POLICY_ROOT % 'index'
req = fakes.HTTPRequest.blank('', version='2.61')
- authorize_res, unauthorize_res = self.common_policy_check(
- self.all_authorized_contexts, self.all_unauthorized_contexts,
+ authorize_res, unauthorize_res = self.common_policy_auth(
+ self.all_authorized_contexts,
rule_name, self.flavor_ctrl.detail, req,
fatal=False)
for resp in authorize_res:
@@ -181,8 +126,8 @@ class FlavorExtraSpecsPolicyTest(base.BasePolicyTest):
fakes.stub_out_flavor_get_by_flavor_id(self)
rule_name = policies.POLICY_ROOT % 'index'
req = fakes.HTTPRequest.blank('', version='2.61')
- authorize_res, unauthorize_res = self.common_policy_check(
- self.all_authorized_contexts, self.all_unauthorized_contexts,
+ authorize_res, unauthorize_res = self.common_policy_auth(
+ self.all_authorized_contexts,
rule_name, self.flavor_ctrl.show, req, '1',
fatal=False)
for resp in authorize_res:
@@ -221,9 +166,8 @@ class FlavorExtraSpecsPolicyTest(base.BasePolicyTest):
"disk": 1,
}
}
- authorize_res, unauthorize_res = self.common_policy_check(
- self.all_system_authorized_contexts,
- self.all_system_unauthorized_contexts,
+ authorize_res, unauthorize_res = self.common_policy_auth(
+ self.all_project_authorized_contexts,
rule_name, self.fm_ctrl._create, req, body=body,
fatal=False)
for resp in authorize_res:
@@ -242,9 +186,8 @@ class FlavorExtraSpecsPolicyTest(base.BasePolicyTest):
self.policy.set_rules({rule: "@"}, overwrite=False)
req = fakes.HTTPRequest.blank('', version='2.61')
- authorize_res, unauthorize_res = self.common_policy_check(
- self.all_system_authorized_contexts,
- self.all_system_unauthorized_contexts,
+ authorize_res, unauthorize_res = self.common_policy_auth(
+ self.all_project_authorized_contexts,
rule_name, self.fm_ctrl._update, req, '1',
body={'flavor': {'description': None}},
fatal=False)
@@ -253,88 +196,6 @@ class FlavorExtraSpecsPolicyTest(base.BasePolicyTest):
for resp in unauthorize_res:
self.assertNotIn('extra_specs', resp['flavor'])
- def test_server_detail_with_extra_specs_policy(self):
- rule = s_policies.SERVERS % 'detail'
- # server 'detail' policy is checked before flavor extra specs 'index'
- # policy so we have to allow it for everyone otherwise it will fail
- # first for unauthorized contexts.
- self.policy.set_rules({rule: "@"}, overwrite=False)
- req = fakes.HTTPRequest.blank('', version='2.47')
- rule_name = policies.POLICY_ROOT % 'index'
- authorize_res, unauthorize_res = self.common_policy_check(
- self.all_authorized_contexts, self.all_unauthorized_contexts,
- rule_name, self.server_ctrl.detail, req,
- fatal=False)
- for resp in authorize_res:
- self.assertIn('extra_specs', resp['servers'][0]['flavor'])
- for resp in unauthorize_res:
- self.assertNotIn('extra_specs', resp['servers'][0]['flavor'])
-
- @mock.patch('nova.objects.BlockDeviceMappingList.bdms_by_instance_uuid')
- @mock.patch('nova.compute.api.API.get_instance_host_status')
- def test_server_show_with_extra_specs_policy(self, mock_get, mock_block):
- rule = s_policies.SERVERS % 'show'
- # server 'show' policy is checked before flavor extra specs 'index'
- # policy so we have to allow it for everyone otherwise it will fail
- # first for unauthorized contexts.
- self.policy.set_rules({rule: "@"}, overwrite=False)
- req = fakes.HTTPRequest.blank('', version='2.47')
- rule_name = policies.POLICY_ROOT % 'index'
- authorize_res, unauthorize_res = self.common_policy_check(
- self.all_authorized_contexts,
- self.all_unauthorized_contexts,
- rule_name, self.server_ctrl.show, req, 'fake',
- fatal=False)
- for resp in authorize_res:
- self.assertIn('extra_specs', resp['server']['flavor'])
- for resp in unauthorize_res:
- self.assertNotIn('extra_specs', resp['server']['flavor'])
-
- @mock.patch('nova.objects.BlockDeviceMappingList.bdms_by_instance_uuid')
- @mock.patch('nova.compute.api.API.get_instance_host_status')
- @mock.patch('nova.compute.api.API.rebuild')
- def test_server_rebuild_with_extra_specs_policy(self, mock_rebuild,
- mock_get, mock_bdm):
- rule = s_policies.SERVERS % 'rebuild'
- # server 'rebuild' policy is checked before flavor extra specs 'index'
- # policy so we have to allow it for everyone otherwise it will fail
- # first for unauthorized contexts.
- self.policy.set_rules({rule: "@"}, overwrite=False)
- req = fakes.HTTPRequest.blank('', version='2.47')
- rule_name = policies.POLICY_ROOT % 'index'
- authorize_res, unauthorize_res = self.common_policy_check(
- self.all_authorized_contexts,
- self.all_unauthorized_contexts,
- rule_name, self.server_ctrl._action_rebuild,
- req, self.instance.uuid,
- body={'rebuild': {"imageRef": uuids.fake_id}},
- fatal=False)
- for resp in authorize_res:
- self.assertIn('extra_specs', resp.obj['server']['flavor'])
- for resp in unauthorize_res:
- self.assertNotIn('extra_specs', resp.obj['server']['flavor'])
-
- @mock.patch('nova.compute.api.API.update_instance')
- def test_server_update_with_extra_specs_policy(self, mock_update):
- rule = s_policies.SERVERS % 'update'
- # server 'update' policy is checked before flavor extra specs 'index'
- # policy so we have to allow it for everyone otherwise it will fail
- # first for unauthorized contexts.
- self.policy.set_rules({rule: "@"}, overwrite=False)
- req = fakes.HTTPRequest.blank('', version='2.47')
- rule_name = policies.POLICY_ROOT % 'index'
- authorize_res, unauthorize_res = self.common_policy_check(
- self.all_authorized_contexts,
- self.all_unauthorized_contexts,
- rule_name, self.server_ctrl.update,
- req, self.instance.uuid,
- body={'server': {'name': 'test'}},
- fatal=False)
- for resp in authorize_res:
- self.assertIn('extra_specs', resp['server']['flavor'])
- for resp in unauthorize_res:
- self.assertNotIn('extra_specs', resp['server']['flavor'])
-
class FlavorExtraSpecsScopeTypePolicyTest(FlavorExtraSpecsPolicyTest):
"""Test Flavor Extra Specs APIs policies with system scope enabled.
@@ -350,65 +211,53 @@ class FlavorExtraSpecsScopeTypePolicyTest(FlavorExtraSpecsPolicyTest):
super(FlavorExtraSpecsScopeTypePolicyTest, self).setUp()
self.flags(enforce_scope=True, group="oslo_policy")
- # Check that all system scoped are able to get flavor extra specs.
- self.all_system_authorized_contexts = [
- self.system_admin_context, self.system_member_context,
- self.system_reader_context, self.system_foo_context
- ]
- self.all_system_unauthorized_contexts = [
- self.legacy_admin_context,
- self.project_admin_context, self.project_member_context,
- self.project_reader_context, self.project_foo_context,
- self.other_project_member_context,
- self.other_project_reader_context
- ]
- # Check that system admin is able to create, update and delete flavor
- # extra specs.
- self.admin_authorized_contexts = [
- self.system_admin_context]
- # Check that non-system admin is not able to create, update and
- # delete flavor extra specs.
- self.admin_unauthorized_contexts = [
- self.legacy_admin_context, self.project_admin_context,
- self.system_member_context, self.system_reader_context,
- self.system_foo_context, self.project_member_context,
- self.project_reader_context, self.project_foo_context,
- self.other_project_member_context,
- self.other_project_reader_context
- ]
+ # Only project users are authorized
+ self.reduce_set('all_project_authorized', self.all_project_contexts)
+ self.reduce_set('all_authorized', self.all_project_contexts)
+
+ # Only admins can do admin things
+ self.admin_authorized_contexts = [self.legacy_admin_context,
+ self.project_admin_context]
+
+
+class FlavorExtraSpecsNoLegacyNoScopeTest(FlavorExtraSpecsPolicyTest):
+ """Test Flavor Extra Specs API policies with deprecated rules
+ disabled, but scope checking still disabled.
+ """
+ without_deprecated_rules = True
+
+ def setUp(self):
+ super(FlavorExtraSpecsNoLegacyNoScopeTest, self).setUp()
+
+ # Disabling legacy rules means that random roles no longer
+ # have power, but without scope checking there is no
+ # difference between project and system
+ everything_but_foo = (
+ self.all_project_contexts | self.all_system_contexts) - set([
+ self.system_foo_context,
+ self.project_foo_context,
+ ])
+ self.reduce_set('all_project_authorized', everything_but_foo)
+ self.reduce_set('all_authorized', everything_but_foo)
class FlavorExtraSpecsNoLegacyPolicyTest(FlavorExtraSpecsScopeTypePolicyTest):
"""Test Flavor Extra Specs APIs policies with system scope enabled,
- and no more deprecated rules that allow the legacy admin API to
- access system_admin_or_owner APIs.
+ and no more deprecated rules.
"""
without_deprecated_rules = True
def setUp(self):
super(FlavorExtraSpecsNoLegacyPolicyTest, self).setUp()
- # Check that system or project reader are able to get flavor
- # extra specs.
- self.all_authorized_contexts = [
- self.legacy_admin_context, self.system_admin_context,
- self.project_admin_context, self.project_member_context,
- self.project_reader_context, self.system_member_context,
- self.system_reader_context, self.other_project_member_context,
- self.other_project_reader_context
- ]
- self.all_unauthorized_contexts = [
- self.project_foo_context, self.system_foo_context
- ]
- # Check that all system scoped reader are able to get flavor
- # extra specs.
- self.all_system_authorized_contexts = [
- self.system_admin_context, self.system_member_context,
- self.system_reader_context
- ]
- self.all_system_unauthorized_contexts = [
- self.legacy_admin_context, self.system_foo_context,
- self.project_admin_context, self.project_member_context,
- self.project_reader_context, self.project_foo_context,
- self.other_project_member_context,
- self.other_project_reader_context
- ]
+ # Non-legacy rules do not imply random roles have any
+ # access. Same note as above, regarding other_project_*
+ # contexts. With scope checking enabled, project and system
+ # contexts stay separate.
+ self.reduce_set(
+ 'all_project_authorized',
+ self.all_project_contexts - set([self.project_foo_context]))
+ everything_but_foo_and_system = (
+ self.all_contexts - set([
+ self.project_foo_context,
+ ]) - self.all_system_contexts)
+ self.reduce_set('all_authorized', everything_but_foo_and_system)
diff --git a/nova/tests/unit/policies/test_flavor_manage.py b/nova/tests/unit/policies/test_flavor_manage.py
index 8a890a85af..0663a689cb 100644
--- a/nova/tests/unit/policies/test_flavor_manage.py
+++ b/nova/tests/unit/policies/test_flavor_manage.py
@@ -10,7 +10,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from oslo_utils.fixture import uuidsentinel as uuids
from nova.api.openstack.compute import flavor_manage
@@ -31,18 +32,11 @@ class FlavorManagePolicyTest(base.BasePolicyTest):
super(FlavorManagePolicyTest, self).setUp()
self.controller = flavor_manage.FlavorManageController()
self.req = fakes.HTTPRequest.blank('')
- # Check that admin is able to manage the flavors.
+ # With legacy rule and no scope checks, all admin can manage
+ # the flavors.
self.admin_authorized_contexts = [
self.legacy_admin_context, self.system_admin_context,
self.project_admin_context]
- # Check that non-admin is not able to manage the flavors.
- self.admin_unauthorized_contexts = [
- self.system_member_context, self.system_reader_context,
- self.system_foo_context, self.project_member_context,
- self.other_project_member_context,
- self.other_project_reader_context,
- self.project_foo_context, self.project_reader_context
- ]
def test_create_flavor_policy(self):
rule_name = fm_policies.POLICY_ROOT % 'create'
@@ -67,29 +61,34 @@ class FlavorManagePolicyTest(base.BasePolicyTest):
"disk": 1,
}
}
- self.common_policy_check(self.admin_authorized_contexts,
- self.admin_unauthorized_contexts,
- rule_name, self.controller._create,
- self.req, body=body)
+ self.common_policy_auth(self.admin_authorized_contexts,
+ rule_name, self.controller._create,
+ self.req, body=body)
@mock.patch('nova.objects.Flavor.get_by_flavor_id')
@mock.patch('nova.objects.Flavor.save')
def test_update_flavor_policy(self, mock_save, mock_get):
rule_name = fm_policies.POLICY_ROOT % 'update'
req = fakes.HTTPRequest.blank('', version='2.55')
- self.common_policy_check(self.admin_authorized_contexts,
- self.admin_unauthorized_contexts,
- rule_name, self.controller._update,
- req, uuids.fake_id,
- body={'flavor': {'description': None}})
+ self.common_policy_auth(self.admin_authorized_contexts,
+ rule_name, self.controller._update,
+ req, uuids.fake_id,
+ body={'flavor': {'description': None}})
@mock.patch('nova.objects.Flavor.destroy')
def test_delete_flavor_policy(self, mock_delete):
rule_name = fm_policies.POLICY_ROOT % 'delete'
- self.common_policy_check(self.admin_authorized_contexts,
- self.admin_unauthorized_contexts,
- rule_name, self.controller._delete,
- self.req, uuids.fake_id)
+ self.common_policy_auth(self.admin_authorized_contexts,
+ rule_name, self.controller._delete,
+ self.req, uuids.fake_id)
+
+
+class FlavorManageNoLegacyNoScopeTest(FlavorManagePolicyTest):
+ """Test Flavor Access API policies with deprecated rules
+ disabled, but scope checking still disabled.
+ """
+
+ without_deprecated_rules = True
class FlavorManageScopeTypePolicyTest(FlavorManagePolicyTest):
@@ -106,23 +105,16 @@ class FlavorManageScopeTypePolicyTest(FlavorManagePolicyTest):
super(FlavorManageScopeTypePolicyTest, self).setUp()
self.flags(enforce_scope=True, group="oslo_policy")
- # Check that system admin is able to manage the flavors.
+ # With scope enabled, only project admin is able to manage
+ # the flavors.
self.admin_authorized_contexts = [
- self.system_admin_context]
- # Check that non-system-admin is not able to manage the flavors.
- self.admin_unauthorized_contexts = [
- self.legacy_admin_context, self.system_member_context,
- self.system_reader_context, self.project_admin_context,
- self.system_foo_context, self.project_member_context,
- self.other_project_member_context,
- self.other_project_reader_context,
- self.project_foo_context, self.project_reader_context
- ]
-
-
-class FlavorManageNoLegacyPolicyTest(FlavorManageScopeTypePolicyTest):
+ self.legacy_admin_context,
+ self.project_admin_context]
+
+
+class FlavorManageScopeTypeNoLegacyPolicyTest(
+ FlavorManageScopeTypePolicyTest):
"""Test Flavor Manage APIs policies with system scope enabled,
- and no more deprecated rules that allow the legacy admin API to
- access system_admin_or_owner APIs.
+ and no more deprecated rules.
"""
without_deprecated_rules = True
diff --git a/nova/tests/unit/policies/test_floating_ip_pools.py b/nova/tests/unit/policies/test_floating_ip_pools.py
index 08f36134d5..551f482bd4 100644
--- a/nova/tests/unit/policies/test_floating_ip_pools.py
+++ b/nova/tests/unit/policies/test_floating_ip_pools.py
@@ -10,7 +10,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from nova.api.openstack.compute import floating_ip_pools
from nova.tests.unit.api.openstack import fakes
@@ -32,15 +32,15 @@ class FloatingIPPoolsPolicyTest(base.BasePolicyTest):
self.req = fakes.HTTPRequest.blank('')
# Check that everyone is able to list FIP pools.
- self.everyone_authorized_contexts = [
+ self.everyone_authorized_contexts = set([
self.legacy_admin_context, self.system_admin_context,
self.project_admin_context, self.project_member_context,
self.project_reader_context, self.project_foo_context,
self.other_project_reader_context,
self.other_project_member_context,
self.system_member_context, self.system_reader_context,
- self.system_foo_context]
- self.everyone_unauthorized_contexts = []
+ self.system_foo_context])
+ self.everyone_unauthorized_contexts = set([])
@mock.patch('nova.network.neutron.API.get_floating_ip_pools')
def test_floating_ip_pools_policy(self, mock_get):
@@ -66,6 +66,10 @@ class FloatingIPPoolsScopeTypePolicyTest(FloatingIPPoolsPolicyTest):
super(FloatingIPPoolsScopeTypePolicyTest, self).setUp()
self.flags(enforce_scope=True, group="oslo_policy")
+ self.reduce_set('everyone_authorized', self.all_project_contexts)
+ self.everyone_unauthorized_contexts = (
+ self.all_contexts - self.everyone_authorized_contexts)
+
class FloatingIPPoolsNoLegacyPolicyTest(FloatingIPPoolsScopeTypePolicyTest):
"""Test Floating IP Pools APIs policies with system scope enabled,
diff --git a/nova/tests/unit/policies/test_floating_ips.py b/nova/tests/unit/policies/test_floating_ips.py
index 55453e7708..26c721e9e9 100644
--- a/nova/tests/unit/policies/test_floating_ips.py
+++ b/nova/tests/unit/policies/test_floating_ips.py
@@ -10,8 +10,9 @@
# License for the specific language governing permissions and limitations
# under the License.
+from unittest import mock
+
import fixtures
-import mock
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import timeutils
@@ -48,12 +49,13 @@ class FloatingIPPolicyTest(base.BasePolicyTest):
id=1, uuid=uuid, vm_state=vm_states.ACTIVE,
task_state=None, launched_at=timeutils.utcnow())
self.mock_get.return_value = self.instance
- # Check that everyone is able to perform crud operation on FIP.
+ # With legacy rule and scope disable, everyone is able to perform crud
+ # operation on FIP.
# NOTE: Nova cannot verify the FIP owner during nova policy
# enforcement so will be passing context's project_id as target to
# policy and always pass. If requester is not admin or owner
# of FIP then neutron will be returning the appropriate error.
- self.reader_authorized_contexts = [
+ self.member_authorized_contexts = [
self.legacy_admin_context, self.system_admin_context,
self.project_admin_context, self.project_member_context,
self.project_reader_context, self.project_foo_context,
@@ -62,48 +64,45 @@ class FloatingIPPolicyTest(base.BasePolicyTest):
self.system_foo_context,
self.other_project_member_context
]
- self.reader_unauthorized_contexts = []
- self.cd_authorized_contexts = self.reader_authorized_contexts
- self.cd_unauthorized_contexts = self.reader_unauthorized_contexts
- # Check that admin or owner is able to add/delete FIP to server.
- self.admin_or_owner_authorized_contexts = [
+ self.project_reader_authorized_contexts = [
self.legacy_admin_context, self.system_admin_context,
self.project_admin_context, self.project_member_context,
- self.project_reader_context, self.project_foo_context
- ]
- # Check that non-admin and non-owner is not able to add/delete
- # FIP to server.
- self.admin_or_owner_unauthorized_contexts = [
+ self.project_reader_context, self.project_foo_context,
+ self.other_project_reader_context,
self.system_member_context, self.system_reader_context,
self.system_foo_context,
- self.other_project_member_context,
- self.other_project_reader_context
+ self.other_project_member_context
]
+ # With legacy rule and no scope checks, all admin, project members
+ # project reader or other project role(because legacy rule allow server
+ # owner- having same project id and no role check) is able to add,
+ # delete FIP to server.
+ self.project_member_authorized_contexts = [
+ self.legacy_admin_context, self.system_admin_context,
+ self.project_admin_context, self.project_member_context,
+ self.project_reader_context, self.project_foo_context]
@mock.patch('nova.network.neutron.API.get_floating_ip')
def test_show_floating_ip_policy(self, mock_get):
rule_name = "os_compute_api:os-floating-ips:show"
- self.common_policy_check(self.reader_authorized_contexts,
- self.reader_unauthorized_contexts,
- rule_name, self.controller.show,
- self.req, uuids.fake_id)
+ self.common_policy_auth(self.project_reader_authorized_contexts,
+ rule_name, self.controller.show,
+ self.req, uuids.fake_id)
@mock.patch('nova.network.neutron.API.get_floating_ips_by_project')
def test_index_floating_ip_policy(self, mock_get):
rule_name = "os_compute_api:os-floating-ips:list"
- self.common_policy_check(self.reader_authorized_contexts,
- self.reader_unauthorized_contexts,
- rule_name, self.controller.index,
- self.req)
+ self.common_policy_auth(self.project_reader_authorized_contexts,
+ rule_name, self.controller.index,
+ self.req)
@mock.patch('nova.network.neutron.API.get_floating_ip_by_address')
@mock.patch('nova.network.neutron.API.allocate_floating_ip')
def test_create_floating_ip_policy(self, mock_create, mock_get):
rule_name = "os_compute_api:os-floating-ips:create"
- self.common_policy_check(self.cd_authorized_contexts,
- self.cd_unauthorized_contexts,
- rule_name, self.controller.create,
- self.req, uuids.fake_id)
+ self.common_policy_auth(self.member_authorized_contexts,
+ rule_name, self.controller.create,
+ self.req, uuids.fake_id)
@mock.patch('nova.network.neutron.API.get_instance_id_by_floating_address')
@mock.patch('nova.network.neutron.API.get_floating_ip')
@@ -112,10 +111,9 @@ class FloatingIPPolicyTest(base.BasePolicyTest):
def test_delete_floating_ip_policy(self, mock_delete, mock_get,
mock_instance):
rule_name = "os_compute_api:os-floating-ips:delete"
- self.common_policy_check(self.cd_authorized_contexts,
- self.cd_unauthorized_contexts,
- rule_name, self.controller.delete,
- self.req, uuids.fake_id)
+ self.common_policy_auth(self.member_authorized_contexts,
+ rule_name, self.controller.delete,
+ self.req, uuids.fake_id)
@mock.patch('nova.objects.Instance.get_network_info')
@mock.patch('nova.network.neutron.API.associate_floating_ip')
@@ -127,11 +125,10 @@ class FloatingIPPolicyTest(base.BasePolicyTest):
mock_net.return_value = network_model.NetworkInfo.hydrate(ninfo)
body = {'addFloatingIp': {
'address': '1.2.3.4'}}
- self.common_policy_check(self.admin_or_owner_authorized_contexts,
- self.admin_or_owner_unauthorized_contexts,
- rule_name,
- self.action_controller._add_floating_ip,
- self.req, self.instance.uuid, body=body)
+ self.common_policy_auth(self.project_member_authorized_contexts,
+ rule_name,
+ self.action_controller._add_floating_ip,
+ self.req, self.instance.uuid, body=body)
@mock.patch('nova.network.neutron.API.get_instance_id_by_floating_address')
@mock.patch('nova.network.neutron.API.get_floating_ip_by_address')
@@ -142,11 +139,53 @@ class FloatingIPPolicyTest(base.BasePolicyTest):
mock_instance.return_value = self.instance.uuid
body = {'removeFloatingIp': {
'address': '1.2.3.4'}}
- self.common_policy_check(self.admin_or_owner_authorized_contexts,
- self.admin_or_owner_unauthorized_contexts,
- rule_name,
- self.action_controller._remove_floating_ip,
- self.req, self.instance.uuid, body=body)
+ self.common_policy_auth(self.project_member_authorized_contexts,
+ rule_name,
+ self.action_controller._remove_floating_ip,
+ self.req, self.instance.uuid, body=body)
+
+
+class FloatingIPNoLegacyNoScopePolicyTest(FloatingIPPolicyTest):
+ """Test Floating IP APIs policies with system scope disabled,
+ and no more deprecated rules.
+ """
+ without_deprecated_rules = True
+ rules_without_deprecation = {
+ fip_policies.BASE_POLICY_NAME % 'list':
+ base_policy.PROJECT_READER_OR_ADMIN,
+ fip_policies.BASE_POLICY_NAME % 'show':
+ base_policy.PROJECT_READER_OR_ADMIN,
+ fip_policies.BASE_POLICY_NAME % 'create':
+ base_policy.PROJECT_MEMBER_OR_ADMIN,
+ fip_policies.BASE_POLICY_NAME % 'delete':
+ base_policy.PROJECT_MEMBER_OR_ADMIN,
+ fip_policies.BASE_POLICY_NAME % 'add':
+ base_policy.PROJECT_MEMBER_OR_ADMIN,
+ fip_policies.BASE_POLICY_NAME % 'remove':
+ base_policy.PROJECT_MEMBER_OR_ADMIN}
+
+ def setUp(self):
+ super(FloatingIPNoLegacyNoScopePolicyTest, self).setUp()
+ # With no legacy rule, only project admin or member will be
+ # able to add/remove FIP to server.
+ self.project_member_authorized_contexts = (
+ self.project_member_or_admin_with_no_scope_no_legacy)
+ # With no legacy, project other roles like foo will not be able
+ # to operate on FIP.
+ self.member_authorized_contexts = [
+ self.legacy_admin_context, self.system_admin_context,
+ self.project_admin_context, self.project_member_context,
+ self.system_member_context,
+ self.other_project_member_context
+ ]
+ self.project_reader_authorized_contexts = [
+ self.legacy_admin_context, self.system_admin_context,
+ self.project_admin_context, self.project_member_context,
+ self.project_reader_context,
+ self.other_project_reader_context,
+ self.system_member_context, self.system_reader_context,
+ self.other_project_member_context
+ ]
class FloatingIPScopeTypePolicyTest(FloatingIPPolicyTest):
@@ -163,63 +202,58 @@ class FloatingIPScopeTypePolicyTest(FloatingIPPolicyTest):
def setUp(self):
super(FloatingIPScopeTypePolicyTest, self).setUp()
self.flags(enforce_scope=True, group="oslo_policy")
+ # Scope enable will not allow system users.
+ self.project_member_authorized_contexts = (
+ self.project_m_r_or_admin_with_scope_and_legacy)
+ self.member_authorized_contexts = [
+ self.legacy_admin_context, self.project_admin_context,
+ self.project_member_context, self.project_reader_context,
+ self.project_foo_context, self.other_project_reader_context,
+ self.other_project_member_context
+ ]
+ self.project_reader_authorized_contexts = [
+ self.legacy_admin_context, self.project_admin_context,
+ self.project_member_context, self.project_reader_context,
+ self.project_foo_context, self.other_project_reader_context,
+ self.other_project_member_context
+ ]
-class FloatingIPNoLegacyPolicyTest(FloatingIPScopeTypePolicyTest):
+class FloatingIPScopeTypeNoLegacyPolicyTest(FloatingIPScopeTypePolicyTest):
"""Test Floating IP APIs policies with system scope enabled,
and no more deprecated rules.
"""
without_deprecated_rules = True
rules_without_deprecation = {
fip_policies.BASE_POLICY_NAME % 'list':
- base_policy.PROJECT_READER_OR_SYSTEM_READER,
+ base_policy.PROJECT_READER_OR_ADMIN,
fip_policies.BASE_POLICY_NAME % 'show':
- base_policy.PROJECT_READER_OR_SYSTEM_READER,
+ base_policy.PROJECT_READER_OR_ADMIN,
fip_policies.BASE_POLICY_NAME % 'create':
- base_policy.PROJECT_MEMBER_OR_SYSTEM_ADMIN,
+ base_policy.PROJECT_MEMBER_OR_ADMIN,
fip_policies.BASE_POLICY_NAME % 'delete':
- base_policy.PROJECT_MEMBER_OR_SYSTEM_ADMIN,
+ base_policy.PROJECT_MEMBER_OR_ADMIN,
fip_policies.BASE_POLICY_NAME % 'add':
- base_policy.PROJECT_MEMBER_OR_SYSTEM_ADMIN,
+ base_policy.PROJECT_MEMBER_OR_ADMIN,
fip_policies.BASE_POLICY_NAME % 'remove':
- base_policy.PROJECT_MEMBER_OR_SYSTEM_ADMIN}
+ base_policy.PROJECT_MEMBER_OR_ADMIN}
def setUp(self):
- super(FloatingIPNoLegacyPolicyTest, self).setUp()
+ super(FloatingIPScopeTypeNoLegacyPolicyTest, self).setUp()
# Check that system admin or owner is able to
# add/delete FIP to server.
- self.admin_or_owner_authorized_contexts = [
- self.system_admin_context,
- self.project_admin_context, self.project_member_context,
- ]
- # Check that non-system and non-admin/owner is not able
- # to add/delete FIP to server.
- self.admin_or_owner_unauthorized_contexts = [
- self.legacy_admin_context, self.system_member_context,
- self.system_reader_context, self.project_reader_context,
- self.project_foo_context,
- self.system_foo_context, self.other_project_member_context,
- self.other_project_reader_context
+ self.project_member_authorized_contexts = (
+ self.project_member_or_admin_with_scope_no_legacy)
+ # With no legacy and scope enabled, system users and project
+ # other roles like foo will not be able to operate FIP.
+ self.member_authorized_contexts = [
+ self.legacy_admin_context, self.project_admin_context,
+ self.project_member_context,
+ self.other_project_member_context
]
- self.reader_authorized_contexts = [
- self.legacy_admin_context, self.system_admin_context,
- self.project_admin_context,
+ self.project_reader_authorized_contexts = [
+ self.legacy_admin_context, self.project_admin_context,
self.project_member_context, self.project_reader_context,
- self.system_member_context, self.system_reader_context,
- self.other_project_member_context,
self.other_project_reader_context,
- ]
- self.reader_unauthorized_contexts = [
- self.project_foo_context,
- self.system_foo_context
- ]
- self.cd_authorized_contexts = [
- self.system_admin_context, self.system_member_context,
- self.project_admin_context, self.project_member_context,
- self.legacy_admin_context, self.other_project_member_context
- ]
- self.cd_unauthorized_contexts = [
- self.system_reader_context,
- self.project_reader_context, self.project_foo_context,
- self.system_foo_context, self.other_project_reader_context
+ self.other_project_member_context
]
diff --git a/nova/tests/unit/policies/test_hosts.py b/nova/tests/unit/policies/test_hosts.py
index cdce7d2b1c..e07c907cf8 100644
--- a/nova/tests/unit/policies/test_hosts.py
+++ b/nova/tests/unit/policies/test_hosts.py
@@ -10,7 +10,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from nova.api.openstack.compute import hosts
from nova.policies import base as base_policy
@@ -32,37 +32,19 @@ class HostsPolicyTest(base.BasePolicyTest):
self.controller = hosts.HostController()
self.req = fakes.HTTPRequest.blank('')
- # Check that admin is able to perform operations on hosts.
- self.system_admin_authorized_contexts = [
- self.system_admin_context, self.legacy_admin_context,
+ # With legacy rule and scope check disabled by default, system admin,
+ # legacy admin, and project admin will be able to perform hosts
+ # Operations.
+ self.project_admin_authorized_contexts = [
+ self.legacy_admin_context, self.system_admin_context,
self.project_admin_context]
- # Check that non-admin is not able to perform operations
- # on hosts.
- self.system_admin_unauthorized_contexts = [
- self.system_member_context, self.system_reader_context,
- self.system_foo_context, self.other_project_member_context,
- self.project_foo_context, self.project_member_context,
- self.project_reader_context, self.other_project_reader_context
- ]
- self.system_reader_authorized_contexts = [
- self.system_admin_context, self.system_member_context,
- self.system_reader_context, self.legacy_admin_context,
- self.project_admin_context
- ]
- self.system_reader_unauthorized_contexts = [
- self.project_foo_context, self.system_foo_context,
- self.project_member_context, self.project_reader_context,
- self.other_project_member_context,
- self.other_project_reader_context
- ]
@mock.patch('nova.compute.api.HostAPI.service_get_all')
def test_list_hosts_policy(self, mock_get):
rule_name = policies.POLICY_NAME % 'list'
- self.common_policy_check(self.system_reader_authorized_contexts,
- self.system_reader_unauthorized_contexts,
- rule_name, self.controller.index,
- self.req)
+ self.common_policy_auth(self.project_admin_authorized_contexts,
+ rule_name, self.controller.index,
+ self.req)
@mock.patch('nova.context.set_target_cell')
@mock.patch('nova.objects.HostMapping.get_by_host')
@@ -71,41 +53,48 @@ class HostsPolicyTest(base.BasePolicyTest):
@mock.patch('nova.compute.api.HostAPI.instance_get_all_by_host')
def test_show_host_policy(self, mock_get, mock_node, mock_map, mock_set):
rule_name = policies.POLICY_NAME % 'show'
- self.common_policy_check(self.system_reader_authorized_contexts,
- self.system_reader_unauthorized_contexts,
- rule_name, self.controller.show,
- self.req, 11111)
+ self.common_policy_auth(self.project_admin_authorized_contexts,
+ rule_name, self.controller.show,
+ self.req, 11111)
def test_update_host_policy(self):
rule_name = policies.POLICY_NAME % 'update'
- self.common_policy_check(self.system_admin_authorized_contexts,
- self.system_admin_unauthorized_contexts,
- rule_name, self.controller.update,
- self.req, 11111, body={})
+ self.common_policy_auth(self.project_admin_authorized_contexts,
+ rule_name, self.controller.update,
+ self.req, 11111, body={})
@mock.patch('nova.compute.api.HostAPI.host_power_action')
def test_reboot_host_policy(self, mock_action):
rule_name = policies.POLICY_NAME % 'reboot'
- self.common_policy_check(self.system_admin_authorized_contexts,
- self.system_admin_unauthorized_contexts,
- rule_name, self.controller.reboot,
- self.req, 11111)
+ self.common_policy_auth(self.project_admin_authorized_contexts,
+ rule_name, self.controller.reboot,
+ self.req, 11111)
@mock.patch('nova.compute.api.HostAPI.host_power_action')
def test_shutdown_host_policy(self, mock_action):
rule_name = policies.POLICY_NAME % 'shutdown'
- self.common_policy_check(self.system_admin_authorized_contexts,
- self.system_admin_unauthorized_contexts,
- rule_name, self.controller.shutdown,
- self.req, 11111)
+ self.common_policy_auth(self.project_admin_authorized_contexts,
+ rule_name, self.controller.shutdown,
+ self.req, 11111)
@mock.patch('nova.compute.api.HostAPI.host_power_action')
def test_startup_host_policy(self, mock_action):
rule_name = policies.POLICY_NAME % 'start'
- self.common_policy_check(self.system_admin_authorized_contexts,
- self.system_admin_unauthorized_contexts,
- rule_name, self.controller.startup,
- self.req, 11111)
+ self.common_policy_auth(self.project_admin_authorized_contexts,
+ rule_name, self.controller.startup,
+ self.req, 11111)
+
+
+class HostsNoLegacyNoScopePolicyTest(HostsPolicyTest):
+ """Test Hosts APIs policies with no legacy deprecated rules
+ and no scope checks which means new defaults only. In this case
+ system admin, legacy admin, and project admin will be able to perform
+ hosts Operations. Legacy admin will be allowed as policy is just admin
+ if no scope checks.
+
+ """
+
+ without_deprecated_rules = True
class HostsScopeTypePolicyTest(HostsPolicyTest):
@@ -122,72 +111,29 @@ class HostsScopeTypePolicyTest(HostsPolicyTest):
super(HostsScopeTypePolicyTest, self).setUp()
self.flags(enforce_scope=True, group="oslo_policy")
- # Check that system admin is able to perform operations on hosts.
- self.system_admin_authorized_contexts = [
- self.system_admin_context]
- # Check that system non-admin is not able to perform operations
- # on hosts.
- self.system_admin_unauthorized_contexts = [
- self.legacy_admin_context, self.project_admin_context,
- self.system_member_context, self.system_reader_context,
- self.system_foo_context, self.other_project_member_context,
- self.project_foo_context, self.project_member_context,
- self.project_reader_context, self.other_project_reader_context
- ]
- self.system_reader_authorized_contexts = [
- self.system_admin_context, self.system_member_context,
- self.system_reader_context
- ]
- self.system_reader_unauthorized_contexts = [
- self.legacy_admin_context, self.project_foo_context,
- self.system_foo_context, self.project_admin_context,
- self.project_member_context, self.project_reader_context,
- self.other_project_member_context,
- self.other_project_reader_context
- ]
-
-
-class HostsNoLegacyPolicyTest(HostsScopeTypePolicyTest):
- """Test Hosts APIs policies with system scope enabled,
- and no more deprecated rules that allow the legacy admin API to
- access system_admin_or_owner APIs.
+ # With scope checks enable, only system admin is able to perform
+ # hosts Operations.
+ self.project_admin_authorized_contexts = [self.legacy_admin_context,
+ self.project_admin_context]
+
+
+class HostsScopeTypeNoLegacyPolicyTest(HostsScopeTypePolicyTest):
+ """Test Hosts APIs policies with with no legacy deprecated rules
+ and scope checks enabled which means scope + new defaults. So
+ only system admin is able to perform hosts Operations.
"""
+
without_deprecated_rules = True
rules_without_deprecation = {
policies.POLICY_NAME % 'list':
- base_policy.SYSTEM_READER,
+ base_policy.ADMIN,
policies.POLICY_NAME % 'show':
- base_policy.SYSTEM_READER,
+ base_policy.ADMIN,
policies.POLICY_NAME % 'update':
- base_policy.SYSTEM_ADMIN,
+ base_policy.ADMIN,
policies.POLICY_NAME % 'reboot':
- base_policy.SYSTEM_ADMIN,
+ base_policy.ADMIN,
policies.POLICY_NAME % 'shutdown':
- base_policy.SYSTEM_ADMIN,
+ base_policy.ADMIN,
policies.POLICY_NAME % 'startup':
- base_policy.SYSTEM_ADMIN}
-
- def setUp(self):
- super(HostsNoLegacyPolicyTest, self).setUp()
-
- self.system_reader_authorized_contexts = [
- self.system_admin_context, self.system_member_context,
- self.system_reader_context
- ]
- self.system_reader_unauthorized_contexts = [
- self.legacy_admin_context, self.project_foo_context,
- self.system_foo_context, self.project_admin_context,
- self.project_member_context, self.project_reader_context,
- self.other_project_member_context,
- self.other_project_reader_context
- ]
- self.system_admin_authorized_contexts = [
- self.system_admin_context
- ]
- self.system_admin_unauthorized_contexts = [
- self.system_member_context, self.system_reader_context,
- self.project_admin_context, self.project_member_context,
- self.legacy_admin_context, self.other_project_member_context,
- self.project_reader_context, self.project_foo_context,
- self.system_foo_context, self.other_project_reader_context
- ]
+ base_policy.ADMIN}
diff --git a/nova/tests/unit/policies/test_hypervisors.py b/nova/tests/unit/policies/test_hypervisors.py
index 2b9eefcfd9..dd17ebe2fe 100644
--- a/nova/tests/unit/policies/test_hypervisors.py
+++ b/nova/tests/unit/policies/test_hypervisors.py
@@ -10,7 +10,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from nova.api.openstack.compute import hypervisors
from nova.policies import base as base_policy
@@ -36,76 +36,67 @@ class HypervisorsPolicyTest(base.BasePolicyTest):
self.controller.host_api.service_get_by_compute_host = mock.MagicMock()
self.controller.host_api.compute_node_get = mock.MagicMock()
- # Check that system scoped admin, member and reader are able to
- # perform operations on hypervisors.
- # NOTE(gmann): Until old default rule which is admin_api is
- # deprecated and not removed, project admin and legacy admin
- # will be able to get hypervisors. This make sure that existing
- # tokens will keep working even we have changed this policy defaults
- # to reader role.
- self.reader_authorized_contexts = [
- self.system_admin_context, self.system_member_context,
- self.system_reader_context, self.legacy_admin_context,
+ # With legacy rule and scope check disabled by default, system admin,
+ # legacy admin, and project admin will be able to perform hypervisors
+ # Operations.
+ self.project_admin_authorized_contexts = [
+ self.legacy_admin_context, self.system_admin_context,
self.project_admin_context]
- # Check that non-system-reader are not able to perform operations
- # on hypervisors
- self.reader_unauthorized_contexts = [
- self.system_foo_context, self.other_project_member_context,
- self.project_foo_context, self.project_member_context,
- self.project_reader_context,
- self.other_project_reader_context,
- ]
def test_list_hypervisors_policy(self):
rule_name = hv_policies.BASE_POLICY_NAME % 'list'
- self.common_policy_check(self.reader_authorized_contexts,
- self.reader_unauthorized_contexts,
- rule_name, self.controller.index,
- self.req)
+ self.common_policy_auth(self.project_admin_authorized_contexts,
+ rule_name, self.controller.index,
+ self.req)
def test_list_details_hypervisors_policy(self):
rule_name = hv_policies.BASE_POLICY_NAME % 'list-detail'
- self.common_policy_check(self.reader_authorized_contexts,
- self.reader_unauthorized_contexts,
- rule_name, self.controller.detail,
- self.req)
+ self.common_policy_auth(self.project_admin_authorized_contexts,
+ rule_name, self.controller.detail,
+ self.req)
def test_show_hypervisors_policy(self):
rule_name = hv_policies.BASE_POLICY_NAME % 'show'
- self.common_policy_check(self.reader_authorized_contexts,
- self.reader_unauthorized_contexts,
- rule_name, self.controller.show,
- self.req, 11111)
+ self.common_policy_auth(self.project_admin_authorized_contexts,
+ rule_name, self.controller.show,
+ self.req, 11111)
@mock.patch('nova.compute.api.HostAPI.get_host_uptime')
def test_uptime_hypervisors_policy(self, mock_uptime):
rule_name = hv_policies.BASE_POLICY_NAME % 'uptime'
- self.common_policy_check(self.reader_authorized_contexts,
- self.reader_unauthorized_contexts,
- rule_name, self.controller.uptime,
- self.req, 11111)
+ self.common_policy_auth(self.project_admin_authorized_contexts,
+ rule_name, self.controller.uptime,
+ self.req, 11111)
def test_search_hypervisors_policy(self):
rule_name = hv_policies.BASE_POLICY_NAME % 'search'
- self.common_policy_check(self.reader_authorized_contexts,
- self.reader_unauthorized_contexts,
- rule_name, self.controller.search,
- self.req, 11111)
+ self.common_policy_auth(self.project_admin_authorized_contexts,
+ rule_name, self.controller.search,
+ self.req, 11111)
def test_servers_hypervisors_policy(self):
rule_name = hv_policies.BASE_POLICY_NAME % 'servers'
- self.common_policy_check(self.reader_authorized_contexts,
- self.reader_unauthorized_contexts,
- rule_name, self.controller.servers,
- self.req, 11111)
+ self.common_policy_auth(self.project_admin_authorized_contexts,
+ rule_name, self.controller.servers,
+ self.req, 11111)
@mock.patch('nova.compute.api.HostAPI.compute_node_statistics')
def test_statistics_hypervisors_policy(self, mock_statistics):
rule_name = hv_policies.BASE_POLICY_NAME % 'statistics'
- self.common_policy_check(self.reader_authorized_contexts,
- self.reader_unauthorized_contexts,
- rule_name, self.controller.statistics,
- self.req)
+ self.common_policy_auth(self.project_admin_authorized_contexts,
+ rule_name, self.controller.statistics,
+ self.req)
+
+
+class HypervisorsNoLegacyNoScopePolicyTest(HypervisorsPolicyTest):
+ """Test Hypervisors APIs policies with no legacy deprecated rules
+ and no scope checks which means new defaults only. In this case
+ system admin, legacy admin, and project admin will be able to perform
+ Hypervisors Operations. Legacy admin will be allowed as policy is just
+ admin if no scope checks.
+ """
+
+ without_deprecated_rules = True
class HypervisorsScopeTypePolicyTest(HypervisorsPolicyTest):
@@ -122,40 +113,33 @@ class HypervisorsScopeTypePolicyTest(HypervisorsPolicyTest):
super(HypervisorsScopeTypePolicyTest, self).setUp()
self.flags(enforce_scope=True, group="oslo_policy")
- # Check that system reader is able to perform operations
- # on hypervisors.
- self.reader_authorized_contexts = [
- self.system_admin_context, self.system_member_context,
- self.system_reader_context]
- # Check that non-system-reader is not able to perform operations
- # on hypervisors.
- self.reader_unauthorized_contexts = [
- self.legacy_admin_context, self.project_admin_context,
- self.system_foo_context, self.project_member_context,
- self.other_project_member_context,
- self.other_project_reader_context,
- self.project_foo_context, self.project_reader_context
- ]
-
-
-class HypervisorsNoLegacyPolicyTest(HypervisorsScopeTypePolicyTest):
- """Test Hypervisors APIs policies with system scope enabled,
- and no more deprecated rules.
+ # With scope checks enable, only system admin is able to perform
+ # hypervisors Operations.
+ self.project_admin_authorized_contexts = [self.legacy_admin_context,
+ self.project_admin_context]
+
+
+class HypervisorsScopeTypeNoLegacyPolicyTest(HypervisorsScopeTypePolicyTest):
+ """Test Hypervisors APIs policies with no legacy deprecated rules
+ and scope checks enabled which means scope + new defaults so
+ only system admin is able to perform hypervisors Operations.
"""
+
without_deprecated_rules = True
+
rules_without_deprecation = {
hv_policies.BASE_POLICY_NAME % 'list':
- base_policy.SYSTEM_READER,
+ base_policy.ADMIN,
hv_policies.BASE_POLICY_NAME % 'list-detail':
- base_policy.SYSTEM_READER,
+ base_policy.ADMIN,
hv_policies.BASE_POLICY_NAME % 'show':
- base_policy.SYSTEM_READER,
+ base_policy.ADMIN,
hv_policies.BASE_POLICY_NAME % 'statistics':
- base_policy.SYSTEM_READER,
+ base_policy.ADMIN,
hv_policies.BASE_POLICY_NAME % 'uptime':
- base_policy.SYSTEM_READER,
+ base_policy.ADMIN,
hv_policies.BASE_POLICY_NAME % 'search':
- base_policy.SYSTEM_READER,
+ base_policy.ADMIN,
hv_policies.BASE_POLICY_NAME % 'servers':
- base_policy.SYSTEM_READER,
+ base_policy.ADMIN,
}
diff --git a/nova/tests/unit/policies/test_instance_actions.py b/nova/tests/unit/policies/test_instance_actions.py
index b3e43b3498..1ca9a66c14 100644
--- a/nova/tests/unit/policies/test_instance_actions.py
+++ b/nova/tests/unit/policies/test_instance_actions.py
@@ -11,8 +11,9 @@
# under the License.
import copy
+from unittest import mock
+
import fixtures
-import mock
from nova.api.openstack import api_version_request
from oslo_policy import policy as oslo_policy
@@ -62,33 +63,17 @@ class InstanceActionsPolicyTest(base.BasePolicyTest):
task_state=None, launched_at=timeutils.utcnow())
self.mock_get.return_value = self.instance
- # Check that system reader are able to show the instance
- # actions events.
- self.system_reader_authorized_contexts = [
- self.system_admin_context, self.system_member_context,
- self.system_reader_context, self.legacy_admin_context,
+ # With legacy rule and no scope checks, any role in project can
+ # get server action and all admin is able to get server action
+ # with event details.
+ self.project_admin_authorized_contexts = [
+ self.legacy_admin_context, self.system_admin_context,
self.project_admin_context]
- # Check that non-system-reader are not able to show the instance
- # actions events.
- self.system_reader_unauthorized_contexts = [
- self.system_foo_context, self.other_project_member_context,
- self.project_foo_context, self.project_member_context,
- self.project_reader_context,
- self.other_project_reader_context,
- ]
-
- self.project_or_system_reader_authorized_contexts = [
+ # and project reader can get their server topology without host info.
+ self.project_reader_authorized_contexts = [
self.legacy_admin_context, self.system_admin_context,
- self.project_admin_context, self.system_member_context,
- self.system_reader_context, self.project_reader_context,
- self.project_member_context, self.project_foo_context
- ]
-
- self.project_or_system_reader_unauthorized_contexts = [
- self.system_foo_context,
- self.other_project_member_context,
- self.other_project_reader_context,
- ]
+ self.project_admin_context, self.project_member_context,
+ self.project_reader_context, self.project_foo_context]
def _set_policy_rules(self, overwrite=True):
rules = {ia_policies.BASE_POLICY_NAME % 'show': '@'}
@@ -97,9 +82,8 @@ class InstanceActionsPolicyTest(base.BasePolicyTest):
def test_index_instance_action_policy(self):
rule_name = ia_policies.BASE_POLICY_NAME % "list"
- self.common_policy_check(
- self.project_or_system_reader_authorized_contexts,
- self.project_or_system_reader_unauthorized_contexts,
+ self.common_policy_auth(
+ self.project_reader_authorized_contexts,
rule_name, self.controller.index,
self.req, self.instance['uuid'])
@@ -108,9 +92,8 @@ class InstanceActionsPolicyTest(base.BasePolicyTest):
fake_action = self.fake_actions[FAKE_UUID][FAKE_REQUEST_ID]
mock_action_get.return_value = fake_action
rule_name = ia_policies.BASE_POLICY_NAME % "show"
- self.common_policy_check(
- self.project_or_system_reader_authorized_contexts,
- self.project_or_system_reader_unauthorized_contexts,
+ self.common_policy_auth(
+ self.project_reader_authorized_contexts,
rule_name, self.controller.show,
self.req, self.instance['uuid'], fake_action['request_id'])
@@ -131,9 +114,8 @@ class InstanceActionsPolicyTest(base.BasePolicyTest):
self._set_policy_rules(overwrite=False)
rule_name = ia_policies.BASE_POLICY_NAME % "events"
- authorize_res, unauthorize_res = self.common_policy_check(
- self.system_reader_authorized_contexts,
- self.system_reader_unauthorized_contexts,
+ authorize_res, unauthorize_res = self.common_policy_auth(
+ self.project_admin_authorized_contexts,
rule_name, self.controller.show,
self.req, self.instance['uuid'],
fake_action['request_id'], fatal=False)
@@ -149,6 +131,28 @@ class InstanceActionsPolicyTest(base.BasePolicyTest):
self.assertNotIn('events', action['instanceAction'])
+class InstanceActionsNoLegacyNoScopePolicyTest(InstanceActionsPolicyTest):
+ """Test os-instance-actions APIs policies with no legacy deprecated rules
+ and no scope checks.
+
+ """
+
+ without_deprecated_rules = True
+ rules_without_deprecation = {
+ ia_policies.BASE_POLICY_NAME % 'list':
+ base_policy.PROJECT_READER_OR_ADMIN,
+ ia_policies.BASE_POLICY_NAME % 'show':
+ base_policy.PROJECT_READER_OR_ADMIN,
+ ia_policies.BASE_POLICY_NAME % 'events':
+ base_policy.ADMIN,
+ }
+
+ def setUp(self):
+ super(InstanceActionsNoLegacyNoScopePolicyTest, self).setUp()
+ self.project_reader_authorized_contexts = (
+ self.project_reader_or_admin_with_no_scope_no_legacy)
+
+
class InstanceActionsDeprecatedPolicyTest(base.BasePolicyTest):
"""Test os-instance-actions APIs Deprecated policies.
@@ -185,7 +189,7 @@ class InstanceActionsDeprecatedPolicyTest(base.BasePolicyTest):
@mock.patch('nova.api.openstack.common.get_instance')
def test_deprecated_policy_overridden_rule_is_checked(
self, mock_instance_get, mock_actions_get):
- # Test to verify if deprecatd overridden policy is working.
+ # Test to verify if deprecated overridden policy is working.
instance = fake_instance.fake_instance_obj(
self.admin_or_owner_req.environ['nova.context'])
@@ -193,7 +197,7 @@ class InstanceActionsDeprecatedPolicyTest(base.BasePolicyTest):
# Check for success as admin_or_owner role. Deprecated rule
# has been overridden with admin checks in policy.yaml
# If admin role pass it means overridden rule is enforced by
- # olso.policy because new default is system reader and the old
+ # oslo.policy because new default is system reader and the old
# default is admin.
self.controller.index(self.admin_or_owner_req, instance['uuid'])
@@ -221,6 +225,11 @@ class InstanceActionsScopeTypePolicyTest(InstanceActionsPolicyTest):
def setUp(self):
super(InstanceActionsScopeTypePolicyTest, self).setUp()
self.flags(enforce_scope=True, group="oslo_policy")
+ # With Scope enable, system users no longer allowed.
+ self.project_admin_authorized_contexts = [
+ self.legacy_admin_context, self.project_admin_context]
+ self.project_reader_authorized_contexts = (
+ self.project_m_r_or_admin_with_scope_and_legacy)
@mock.patch('nova.objects.InstanceActionEventList.get_by_action')
@mock.patch('nova.objects.InstanceAction.get_by_request_id')
@@ -241,9 +250,8 @@ class InstanceActionsScopeTypePolicyTest(InstanceActionsPolicyTest):
self._set_policy_rules(overwrite=False)
rule_name = ia_policies.BASE_POLICY_NAME % "events:details"
- authorize_res, unauthorize_res = self.common_policy_check(
- self.system_reader_authorized_contexts,
- self.system_reader_unauthorized_contexts,
+ authorize_res, unauthorize_res = self.common_policy_auth(
+ self.project_admin_authorized_contexts,
rule_name, self.controller.show,
self.req, self.instance['uuid'],
fake_action['request_id'], fatal=False)
@@ -267,54 +275,25 @@ class InstanceActionsScopeTypePolicyTest(InstanceActionsPolicyTest):
self.assertNotIn('details', event)
-class InstanceActionsNoLegacyPolicyTest(InstanceActionsPolicyTest):
+class InstanceActionsScopeTypeNoLegacyPolicyTest(
+ InstanceActionsScopeTypePolicyTest):
"""Test os-instance-actions APIs policies with system scope enabled,
- and no more deprecated rules that allow the legacy admin API to
- access system_admin_or_owner APIs.
+ and no more deprecated rules.
"""
without_deprecated_rules = True
rules_without_deprecation = {
ia_policies.BASE_POLICY_NAME % 'list':
- base_policy.PROJECT_READER_OR_SYSTEM_READER,
+ base_policy.PROJECT_READER_OR_ADMIN,
ia_policies.BASE_POLICY_NAME % 'show':
- base_policy.PROJECT_READER_OR_SYSTEM_READER,
+ base_policy.PROJECT_READER_OR_ADMIN,
ia_policies.BASE_POLICY_NAME % 'events':
- base_policy.SYSTEM_READER,
+ base_policy.ADMIN,
}
def setUp(self):
- super(InstanceActionsNoLegacyPolicyTest, self).setUp()
- self.flags(enforce_scope=True, group="oslo_policy")
-
- # Check that system reader are able to get the
- # instance action events.
- self.system_reader_authorized_contexts = [
- self.system_admin_context, self.system_reader_context,
- self.system_member_context]
- # Check that non-system-reader are not able to
- # get the instance action events
- self.system_reader_unauthorized_contexts = [
- self.project_admin_context,
- self.system_foo_context, self.legacy_admin_context,
- self.other_project_member_context,
- self.project_foo_context, self.project_member_context,
- self.project_reader_context,
- self.other_project_reader_context,
- ]
-
- # Check that system or projct reader is able to
- # show the instance actions events.
- self.project_or_system_reader_authorized_contexts = [
- self.system_admin_context,
- self.project_admin_context, self.system_member_context,
- self.system_reader_context, self.project_reader_context,
- self.project_member_context,
- ]
-
- # Check that non-system or non-project reader is not able to
- # show the instance actions events.
- self.project_or_system_reader_unauthorized_contexts = [
- self.legacy_admin_context, self.project_foo_context,
- self.system_foo_context, self.other_project_member_context,
- self.other_project_reader_context,
- ]
+ super(InstanceActionsScopeTypeNoLegacyPolicyTest, self).setUp()
+ # With no legacy and scope enable, only project admin, member,
+ # and reader will be able to get server action and only admin
+ # with event details.
+ self.project_reader_authorized_contexts = (
+ self.project_reader_or_admin_with_scope_no_legacy)
diff --git a/nova/tests/unit/policies/test_instance_usage_audit_log.py b/nova/tests/unit/policies/test_instance_usage_audit_log.py
index e320beacd2..71b0cdd2aa 100644
--- a/nova/tests/unit/policies/test_instance_usage_audit_log.py
+++ b/nova/tests/unit/policies/test_instance_usage_audit_log.py
@@ -10,7 +10,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from nova.api.openstack.compute import instance_usage_audit_log as iual
from nova.policies import base as base_policy
@@ -35,37 +35,37 @@ class InstanceUsageAuditLogPolicyTest(base.BasePolicyTest):
self.controller.host_api.task_log_get_all = mock.MagicMock()
self.controller.host_api.service_get_all = mock.MagicMock()
- # Check that admin is able to get instance usage audit log.
- # NOTE(gmann): Until old default rule which is admin_api is
- # deprecated and not removed, project admin and legacy admin
- # will be able to get instance usage audit log. This make sure
- # that existing tokens will keep working even we have changed
- # this policy defaults to reader role.
- self.reader_authorized_contexts = [
+ # With legacy rule, all admin_api will be able to get instance usage
+ # audit log.
+ self.admin_authorized_contexts = [
self.legacy_admin_context, self.system_admin_context,
- self.project_admin_context, self.system_member_context,
- self.system_reader_context]
- # Check that non-admin is not able to get instance usage audit log.
- self.reader_unauthorized_contexts = [
- self.system_foo_context, self.project_member_context,
- self.other_project_member_context,
- self.other_project_reader_context,
- self.project_foo_context, self.project_reader_context
- ]
+ self.project_admin_context]
def test_show_policy(self):
rule_name = iual_policies.BASE_POLICY_NAME % 'show'
- self.common_policy_check(self.reader_authorized_contexts,
- self.reader_unauthorized_contexts,
- rule_name, self.controller.show,
- self.req, '2020-03-25 14:40:00')
+ self.common_policy_auth(self.admin_authorized_contexts,
+ rule_name, self.controller.show,
+ self.req, '2020-03-25 14:40:00')
def test_index_policy(self):
rule_name = iual_policies.BASE_POLICY_NAME % 'list'
- self.common_policy_check(self.reader_authorized_contexts,
- self.reader_unauthorized_contexts,
- rule_name, self.controller.index,
- self.req)
+ self.common_policy_auth(self.admin_authorized_contexts,
+ rule_name, self.controller.index,
+ self.req)
+
+
+class InstanceUsageNoLegacyNoScopeTest(InstanceUsageAuditLogPolicyTest):
+ """Test Instance Usage API policies with deprecated rules
+ disabled, but scope checking still disabled.
+ """
+
+ without_deprecated_rules = True
+ rules_without_deprecation = {
+ iual_policies.BASE_POLICY_NAME % 'list':
+ base_policy.ADMIN,
+ iual_policies.BASE_POLICY_NAME % 'show':
+ base_policy.ADMIN,
+ }
class InstanceUsageScopeTypePolicyTest(InstanceUsageAuditLogPolicyTest):
@@ -83,29 +83,21 @@ class InstanceUsageScopeTypePolicyTest(InstanceUsageAuditLogPolicyTest):
super(InstanceUsageScopeTypePolicyTest, self).setUp()
self.flags(enforce_scope=True, group="oslo_policy")
- # Check that system reader is able to get instance usage audit log.
- self.reader_authorized_contexts = [
- self.system_admin_context, self.system_member_context,
- self.system_reader_context]
- # Check that non-system-admin is not able to get instance
- # usage audit log.
- self.reader_unauthorized_contexts = [
- self.legacy_admin_context, self.project_admin_context,
- self.system_foo_context, self.project_member_context,
- self.other_project_member_context,
- self.other_project_reader_context,
- self.project_foo_context, self.project_reader_context
- ]
-
-
-class InstanceUsageNoLegacyPolicyTest(InstanceUsageScopeTypePolicyTest):
+ # Scope checks remove project users power.
+ self.admin_authorized_contexts = [
+ self.legacy_admin_context,
+ self.project_admin_context]
+
+
+class InstanceUsageScopeTypeNoLegacyPolicyTest(
+ InstanceUsageScopeTypePolicyTest):
"""Test Instance Usage Audit Log APIs policies with system scope enabled,
and no more deprecated rules.
"""
without_deprecated_rules = True
rules_without_deprecation = {
iual_policies.BASE_POLICY_NAME % 'list':
- base_policy.SYSTEM_READER,
+ base_policy.ADMIN,
iual_policies.BASE_POLICY_NAME % 'show':
- base_policy.SYSTEM_READER,
+ base_policy.ADMIN,
}
diff --git a/nova/tests/unit/policies/test_keypairs.py b/nova/tests/unit/policies/test_keypairs.py
index 4faefea2ef..ee39133b7a 100644
--- a/nova/tests/unit/policies/test_keypairs.py
+++ b/nova/tests/unit/policies/test_keypairs.py
@@ -10,7 +10,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from nova.policies import keypairs as policies
from nova.api.openstack.compute import keypairs
@@ -34,7 +35,7 @@ class KeypairsPolicyTest(base.BasePolicyTest):
# Check that everyone is able to create, delete and get
# their keypairs.
- self.everyone_authorized_contexts = [
+ self.everyone_authorized_contexts = set([
self.legacy_admin_context, self.system_admin_context,
self.project_admin_context,
self.system_member_context, self.system_reader_context,
@@ -42,88 +43,58 @@ class KeypairsPolicyTest(base.BasePolicyTest):
self.project_reader_context, self.project_foo_context,
self.other_project_member_context,
self.other_project_reader_context,
- ]
- self.everyone_unauthorized_contexts = []
+ ])
# Check that admin is able to create, delete and get
# other users keypairs.
- self.admin_authorized_contexts = [
+ self.admin_authorized_contexts = set([
self.legacy_admin_context, self.system_admin_context,
- self.project_admin_context]
- # Check that non-admin is not able to create, delete and get
- # other users keypairs.
- self.admin_unauthorized_contexts = [
- self.system_member_context, self.system_reader_context,
- self.system_foo_context, self.project_member_context,
- self.project_reader_context, self.project_foo_context,
- self.other_project_member_context,
- self.other_project_reader_context,
- ]
-
- # Check that system reader is able to get
- # other users keypairs.
- self.system_reader_authorized_contexts = [
- self.legacy_admin_context, self.system_admin_context,
- self.project_admin_context, self.system_member_context,
- self.system_reader_context]
- # Check that non-system reader is not able to get
- # other users keypairs.
- self.system_reader_unauthorized_contexts = [
- self.system_foo_context, self.project_member_context,
- self.project_reader_context, self.project_foo_context,
- self.other_project_member_context,
- self.other_project_reader_context,
- ]
+ self.project_admin_context])
@mock.patch('nova.compute.api.KeypairAPI.get_key_pairs')
def test_index_keypairs_policy(self, mock_get):
rule_name = policies.POLICY_ROOT % 'index'
- self.common_policy_check(self.everyone_authorized_contexts,
- self.everyone_unauthorized_contexts,
- rule_name,
- self.controller.index,
- self.req)
+ self.common_policy_auth(self.everyone_authorized_contexts,
+ rule_name,
+ self.controller.index,
+ self.req)
@mock.patch('nova.compute.api.KeypairAPI.get_key_pairs')
def test_index_others_keypairs_policy(self, mock_get):
req = fakes.HTTPRequest.blank('?user_id=user2', version='2.10')
rule_name = policies.POLICY_ROOT % 'index'
- self.common_policy_check(self.system_reader_authorized_contexts,
- self.system_reader_unauthorized_contexts,
- rule_name,
- self.controller.index,
- req)
+ self.common_policy_auth(self.admin_authorized_contexts,
+ rule_name,
+ self.controller.index,
+ req)
@mock.patch('nova.compute.api.KeypairAPI.get_key_pair')
def test_show_keypairs_policy(self, mock_get):
rule_name = policies.POLICY_ROOT % 'show'
- self.common_policy_check(self.everyone_authorized_contexts,
- self.everyone_unauthorized_contexts,
- rule_name,
- self.controller.show,
- self.req, fakes.FAKE_UUID)
+ self.common_policy_auth(self.everyone_authorized_contexts,
+ rule_name,
+ self.controller.show,
+ self.req, fakes.FAKE_UUID)
@mock.patch('nova.compute.api.KeypairAPI.get_key_pair')
def test_show_others_keypairs_policy(self, mock_get):
# Change the user_id in request context.
req = fakes.HTTPRequest.blank('?user_id=user2', version='2.10')
rule_name = policies.POLICY_ROOT % 'show'
- self.common_policy_check(self.system_reader_authorized_contexts,
- self.system_reader_unauthorized_contexts,
- rule_name,
- self.controller.show,
- req, fakes.FAKE_UUID)
+ self.common_policy_auth(self.admin_authorized_contexts,
+ rule_name,
+ self.controller.show,
+ req, fakes.FAKE_UUID)
@mock.patch('nova.compute.api.KeypairAPI.create_key_pair')
def test_create_keypairs_policy(self, mock_create):
rule_name = policies.POLICY_ROOT % 'create'
mock_create.return_value = (test_keypair.fake_keypair, 'FAKE_KEY')
- self.common_policy_check(self.everyone_authorized_contexts,
- self.everyone_unauthorized_contexts,
- rule_name,
- self.controller.create,
- self.req,
- body={'keypair': {'name': 'create_test'}})
+ self.common_policy_auth(self.everyone_authorized_contexts,
+ rule_name,
+ self.controller.create,
+ self.req,
+ body={'keypair': {'name': 'create_test'}})
@mock.patch('nova.compute.api.KeypairAPI.create_key_pair')
def test_create_others_keypairs_policy(self, mock_create):
@@ -132,31 +103,39 @@ class KeypairsPolicyTest(base.BasePolicyTest):
rule_name = policies.POLICY_ROOT % 'create'
mock_create.return_value = (test_keypair.fake_keypair, 'FAKE_KEY')
body = {'keypair': {'name': 'test2', 'user_id': 'user2'}}
- self.common_policy_check(self.admin_authorized_contexts,
- self.admin_unauthorized_contexts,
- rule_name,
- self.controller.create,
- req, body=body)
+ self.common_policy_auth(self.admin_authorized_contexts,
+ rule_name,
+ self.controller.create,
+ req, body=body)
@mock.patch('nova.compute.api.KeypairAPI.delete_key_pair')
def test_delete_keypairs_policy(self, mock_delete):
rule_name = policies.POLICY_ROOT % 'delete'
- self.common_policy_check(self.everyone_authorized_contexts,
- self.everyone_unauthorized_contexts,
- rule_name,
- self.controller.delete,
- self.req, fakes.FAKE_UUID)
+ self.common_policy_auth(self.everyone_authorized_contexts,
+ rule_name,
+ self.controller.delete,
+ self.req, fakes.FAKE_UUID)
@mock.patch('nova.compute.api.KeypairAPI.delete_key_pair')
def test_delete_others_keypairs_policy(self, mock_delete):
# Change the user_id in request context.
req = fakes.HTTPRequest.blank('?user_id=user2', version='2.10')
rule_name = policies.POLICY_ROOT % 'delete'
- self.common_policy_check(self.admin_authorized_contexts,
- self.admin_unauthorized_contexts,
- rule_name,
- self.controller.delete,
- req, fakes.FAKE_UUID)
+ self.common_policy_auth(self.admin_authorized_contexts,
+ rule_name,
+ self.controller.delete,
+ req, fakes.FAKE_UUID)
+
+
+class KeypairsNoLegacyNoScopeTest(KeypairsPolicyTest):
+ """Test Keypairs API policies with deprecated rules
+ disabled, but scope checking still disabled.
+ """
+
+ without_deprecated_rules = True
+
+ def setUp(self):
+ super(KeypairsNoLegacyNoScopeTest, self).setUp()
class KeypairsScopeTypePolicyTest(KeypairsPolicyTest):
@@ -173,6 +152,12 @@ class KeypairsScopeTypePolicyTest(KeypairsPolicyTest):
super(KeypairsScopeTypePolicyTest, self).setUp()
self.flags(enforce_scope=True, group="oslo_policy")
+ # With scope checking, only project-scoped users are allowed
+ self.reduce_set('everyone_authorized', self.all_project_contexts)
+ self.admin_authorized_contexts = [
+ self.legacy_admin_context,
+ self.project_admin_context]
+
class KeypairsNoLegacyPolicyTest(KeypairsScopeTypePolicyTest):
"""Test Keypairs APIs policies with system scope enabled,
@@ -180,35 +165,3 @@ class KeypairsNoLegacyPolicyTest(KeypairsScopeTypePolicyTest):
access system APIs.
"""
without_deprecated_rules = True
-
- def setUp(self):
- super(KeypairsNoLegacyPolicyTest, self).setUp()
-
- # Check that system admin is able to create, delete and get
- # other users keypairs.
- self.admin_authorized_contexts = [
- self.system_admin_context]
- # Check that system non-admin is not able to create, delete and get
- # other users keypairs.
- self.admin_unauthorized_contexts = [
- self.legacy_admin_context, self.system_member_context,
- self.system_reader_context, self.system_foo_context,
- self.project_admin_context, self.project_member_context,
- self.other_project_member_context,
- self.other_project_reader_context,
- self.project_foo_context, self.project_reader_context
- ]
- # Check that system reader is able to get
- # other users keypairs.
- self.system_reader_authorized_contexts = [
- self.system_admin_context, self.system_member_context,
- self.system_reader_context]
- # Check that non-system reader is not able to get
- # other users keypairs.
- self.system_reader_unauthorized_contexts = [
- self.legacy_admin_context, self.project_admin_context,
- self.system_foo_context, self.project_member_context,
- self.project_reader_context, self.project_foo_context,
- self.other_project_member_context,
- self.other_project_reader_context,
- ]
diff --git a/nova/tests/unit/policies/test_limits.py b/nova/tests/unit/policies/test_limits.py
index cab2b5f679..aba647caec 100644
--- a/nova/tests/unit/policies/test_limits.py
+++ b/nova/tests/unit/policies/test_limits.py
@@ -10,15 +10,19 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+import functools
+from unittest import mock
from nova.api.openstack.compute import limits
+import nova.conf
from nova.policies import base as base_policy
from nova.policies import limits as limits_policies
from nova import quota
from nova.tests.unit.api.openstack import fakes
from nova.tests.unit.policies import base
+CONF = nova.conf.CONF
+
class LimitsPolicyTest(base.BasePolicyTest):
"""Test Limits APIs policies with all possible context.
@@ -55,48 +59,52 @@ class LimitsPolicyTest(base.BasePolicyTest):
mock_get_project_quotas.start()
# Check that everyone is able to get their limits
- self.everyone_authorized_contexts = [
- self.legacy_admin_context, self.system_admin_context,
- self.project_admin_context, self.system_member_context,
- self.system_reader_context, self.system_foo_context,
- self.project_member_context, self.other_project_member_context,
- self.project_foo_context, self.project_reader_context,
- self.other_project_reader_context,
- ]
- self.everyone_unauthorized_contexts = []
-
- # Check that system reader is able to get other projects limit.
- # NOTE(gmann): Until old default rule which is admin_api is
- # deprecated and not removed, project admin and legacy admin
- # will be able to get limit. This make sure that existing
- # tokens will keep working even we have changed this policy defaults
- # to reader role.
- self.reader_authorized_contexts = [
+ self.everyone_authorized_contexts = self.all_contexts
+
+ # With legacy rule, any admin is able to get other projects limit.
+ self.project_admin_authorized_contexts = [
self.legacy_admin_context, self.system_admin_context,
- self.project_admin_context, self.system_member_context,
- self.system_reader_context]
- # Check that non-admin is not able to get other projects limit.
- self.reader_unauthorized_contexts = [
- self.system_foo_context, self.project_member_context,
- self.other_project_member_context,
- self.other_project_reader_context,
- self.project_foo_context, self.project_reader_context
- ]
+ self.project_admin_context]
def test_get_limits_policy(self):
rule_name = limits_policies.BASE_POLICY_NAME
- self.common_policy_check(self.everyone_authorized_contexts,
- self.everyone_unauthorized_contexts,
- rule_name, self.controller.index,
- self.req)
+ self.common_policy_auth(self.everyone_authorized_contexts,
+ rule_name, self.controller.index,
+ self.req)
def test_get_other_limits_policy(self):
+ rule = limits_policies.BASE_POLICY_NAME
+ self.policy.set_rules({rule: "@"}, overwrite=False)
req = fakes.HTTPRequest.blank('/?tenant_id=faketenant')
rule_name = limits_policies.OTHER_PROJECT_LIMIT_POLICY_NAME
- self.common_policy_check(self.reader_authorized_contexts,
- self.reader_unauthorized_contexts,
- rule_name, self.controller.index,
- req)
+ if not CONF.oslo_policy.enforce_scope:
+ check_rule = rule_name
+ else:
+ check_rule = functools.partial(base.rule_if_system,
+ rule, rule_name)
+ self.common_policy_auth(self.project_admin_authorized_contexts,
+ check_rule, self.controller.index,
+ req)
+
+
+class LimitsNoLegacyNoScopeTest(LimitsPolicyTest):
+ """Test Flavor Access API policies with deprecated rules
+ disabled, but scope checking still disabled.
+ """
+
+ without_deprecated_rules = True
+ rules_without_deprecation = {
+ limits_policies.OTHER_PROJECT_LIMIT_POLICY_NAME:
+ base_policy.ADMIN}
+
+ def setUp(self):
+ super(LimitsNoLegacyNoScopeTest, self).setUp()
+
+ # Even with no legacy rule, any admin can get other project
+ # limits.
+ self.project_admin_authorized_contexts = [
+ self.legacy_admin_context, self.system_admin_context,
+ self.project_admin_context]
class LimitsScopeTypePolicyTest(LimitsPolicyTest):
@@ -114,22 +122,18 @@ class LimitsScopeTypePolicyTest(LimitsPolicyTest):
super(LimitsScopeTypePolicyTest, self).setUp()
self.flags(enforce_scope=True, group="oslo_policy")
- # Check that system reader is able to get other projects limit.
- self.reader_authorized_contexts = [
- self.system_admin_context, self.system_member_context,
- self.system_reader_context]
- # Check that non-system reader is not able toget other
- # projects limit.
- self.reader_unauthorized_contexts = [
- self.legacy_admin_context, self.system_foo_context,
- self.project_admin_context, self.project_member_context,
+ # With Scope enable, system users no longer allowed.
+ self.project_admin_authorized_contexts = [
+ self.legacy_admin_context, self.project_admin_context]
+ self.everyone_authorized_contexts = [
+ self.legacy_admin_context, self.project_admin_context,
+ self.project_member_context, self.project_reader_context,
self.other_project_member_context,
- self.other_project_reader_context,
- self.project_foo_context, self.project_reader_context
+ self.project_foo_context, self.other_project_reader_context
]
-class LimitsNoLegacyPolicyTest(LimitsScopeTypePolicyTest):
+class LimitsScopeTypeNoLegacyPolicyTest(LimitsScopeTypePolicyTest):
"""Test Limits APIs policies with system scope enabled,
and no more deprecated rules that allow the legacy admin API to
access system APIs.
@@ -137,4 +141,17 @@ class LimitsNoLegacyPolicyTest(LimitsScopeTypePolicyTest):
without_deprecated_rules = True
rules_without_deprecation = {
limits_policies.OTHER_PROJECT_LIMIT_POLICY_NAME:
- base_policy.SYSTEM_READER}
+ base_policy.ADMIN}
+
+ def setUp(self):
+ super(LimitsScopeTypeNoLegacyPolicyTest, self).setUp()
+ # With no legacy and scope enable, only project level admin
+ # will get other projects limit.
+ self.project_admin_authorized_contexts = [
+ self.legacy_admin_context, self.project_admin_context]
+ self.everyone_authorized_contexts = [
+ self.legacy_admin_context, self.project_admin_context,
+ self.project_member_context, self.project_reader_context,
+ self.other_project_member_context,
+ self.project_foo_context, self.other_project_reader_context
+ ]
diff --git a/nova/tests/unit/policies/test_lock_server.py b/nova/tests/unit/policies/test_lock_server.py
index 883c71929e..31de5cff0c 100644
--- a/nova/tests/unit/policies/test_lock_server.py
+++ b/nova/tests/unit/policies/test_lock_server.py
@@ -10,13 +10,16 @@
# License for the specific language governing permissions and limitations
# under the License.
+import functools
+from unittest import mock
+
import fixtures
-import mock
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import timeutils
from nova.api.openstack.compute import lock_server
from nova.compute import vm_states
+import nova.conf
from nova import exception
from nova.policies import base as base_policy
from nova.policies import lock_server as ls_policies
@@ -24,6 +27,8 @@ from nova.tests.unit.api.openstack import fakes
from nova.tests.unit import fake_instance
from nova.tests.unit.policies import base
+CONF = nova.conf.CONF
+
class LockServerPolicyTest(base.BasePolicyTest):
"""Test Lock server APIs policies with all possible context.
@@ -48,54 +53,39 @@ class LockServerPolicyTest(base.BasePolicyTest):
task_state=None, launched_at=timeutils.utcnow())
self.mock_get.return_value = self.instance
- # Check that admin or and server owner is able to lock/unlock
- # the server
- self.admin_or_owner_authorized_contexts = [
+ # With legacy rule and no scope checks, all admin, project members
+ # project reader or other project role(because legacy rule allow server
+ # owner- having same project id and no role check) is able to lock,
+ # unlock the server.
+ self.project_action_authorized_contexts = [
self.legacy_admin_context, self.system_admin_context,
self.project_admin_context, self.project_member_context,
self.project_reader_context, self.project_foo_context]
- # Check that non-admin/owner is not able to lock/unlock
- # the server
- self.admin_or_owner_unauthorized_contexts = [
- self.system_member_context, self.system_reader_context,
- self.system_foo_context,
- self.other_project_member_context,
- self.other_project_reader_context,
- ]
- # Check that admin is able to unlock the server which is
- # locked by other
- self.admin_authorized_contexts = [
+
+ # By default, legacy rule are enable and scope check is disabled.
+ # system admin, legacy admin, and project admin is able to override
+ # unlock, regardless who locked the server.
+ self.project_admin_authorized_contexts = [
self.legacy_admin_context, self.system_admin_context,
self.project_admin_context]
- # Check that non-admin is not able to unlock the server
- # which is locked by other
- self.admin_unauthorized_contexts = [
- self.system_member_context, self.system_reader_context,
- self.system_foo_context, self.project_member_context,
- self.project_reader_context, self.project_foo_context,
- self.other_project_member_context,
- self.other_project_reader_context,
- ]
@mock.patch('nova.compute.api.API.lock')
def test_lock_server_policy(self, mock_lock):
rule_name = ls_policies.POLICY_ROOT % 'lock'
- self.common_policy_check(self.admin_or_owner_authorized_contexts,
- self.admin_or_owner_unauthorized_contexts,
- rule_name,
- self.controller._lock,
- self.req, self.instance.uuid,
- body={'lock': {}})
+ self.common_policy_auth(self.project_action_authorized_contexts,
+ rule_name,
+ self.controller._lock,
+ self.req, self.instance.uuid,
+ body={'lock': {}})
@mock.patch('nova.compute.api.API.unlock')
def test_unlock_server_policy(self, mock_unlock):
rule_name = ls_policies.POLICY_ROOT % 'unlock'
- self.common_policy_check(self.admin_or_owner_authorized_contexts,
- self.admin_or_owner_unauthorized_contexts,
- rule_name,
- self.controller._unlock,
- self.req, self.instance.uuid,
- body={'unlock': {}})
+ self.common_policy_auth(self.project_action_authorized_contexts,
+ rule_name,
+ self.controller._unlock,
+ self.req, self.instance.uuid,
+ body={'unlock': {}})
@mock.patch('nova.compute.api.API.unlock')
@mock.patch('nova.compute.api.API.is_expected_locked_by')
@@ -104,12 +94,16 @@ class LockServerPolicyTest(base.BasePolicyTest):
rule = ls_policies.POLICY_ROOT % 'unlock'
self.policy.set_rules({rule: "@"}, overwrite=False)
rule_name = ls_policies.POLICY_ROOT % 'unlock:unlock_override'
- self.common_policy_check(self.admin_authorized_contexts,
- self.admin_unauthorized_contexts,
- rule_name,
- self.controller._unlock,
- self.req, self.instance.uuid,
- body={'unlock': {}})
+ if not CONF.oslo_policy.enforce_scope:
+ check_rule = rule_name
+ else:
+ check_rule = functools.partial(base.rule_if_system,
+ rule, rule_name)
+ self.common_policy_auth(self.project_admin_authorized_contexts,
+ check_rule,
+ self.controller._unlock,
+ self.req, self.instance.uuid,
+ body={'unlock': {}})
def test_lock_server_policy_failed_with_other_user(self):
# Change the user_id in request context.
@@ -125,7 +119,7 @@ class LockServerPolicyTest(base.BasePolicyTest):
exc.format_message())
@mock.patch('nova.compute.api.API.lock')
- def test_lock_sevrer_overridden_policy_pass_with_same_user(
+ def test_lock_server_overridden_policy_pass_with_same_user(
self, mock_lock):
rule_name = ls_policies.POLICY_ROOT % 'lock'
self.policy.set_rules({rule_name: "user_id:%(user_id)s"})
@@ -134,6 +128,22 @@ class LockServerPolicyTest(base.BasePolicyTest):
body={'lock': {}})
+class LockServerNoLegacyNoScopePolicyTest(LockServerPolicyTest):
+ """Test lock/unlock server APIs policies with no legacy deprecated rules
+ and no scope checks which means new defaults only.
+
+ """
+
+ without_deprecated_rules = True
+
+ def setUp(self):
+ super(LockServerNoLegacyNoScopePolicyTest, self).setUp()
+ # With no legacy rule, only project admin or member will be
+ # able to lock/unlock the server.
+ self.project_action_authorized_contexts = (
+ self.project_member_or_admin_with_no_scope_no_legacy)
+
+
class LockServerScopeTypePolicyTest(LockServerPolicyTest):
"""Test Lock Server APIs policies with system scope enabled.
This class set the nova.conf [oslo_policy] enforce_scope to True
@@ -147,49 +157,28 @@ class LockServerScopeTypePolicyTest(LockServerPolicyTest):
def setUp(self):
super(LockServerScopeTypePolicyTest, self).setUp()
self.flags(enforce_scope=True, group="oslo_policy")
+ # Scope enable will not allow system admin to lock/unlock the server.
+ self.project_action_authorized_contexts = (
+ self.project_m_r_or_admin_with_scope_and_legacy)
+ self.project_admin_authorized_contexts = [
+ self.legacy_admin_context, self.project_admin_context]
-class LockServerNoLegacyPolicyTest(LockServerScopeTypePolicyTest):
+class LockServerScopeTypeNoLegacyPolicyTest(LockServerScopeTypePolicyTest):
"""Test Lock Server APIs policies with system scope enabled,
- and no more deprecated rules that allow the legacy admin API to
- access system APIs.
+ and no more deprecated rules.
"""
without_deprecated_rules = True
def setUp(self):
- super(LockServerNoLegacyPolicyTest, self).setUp()
- # Check that system admin or and server owner is able to lock/unlock
- # the server
- self.admin_or_owner_authorized_contexts = [
- self.system_admin_context,
- self.project_admin_context, self.project_member_context]
- # Check that non-system/admin/owner is not able to lock/unlock
- # the server
- self.admin_or_owner_unauthorized_contexts = [
- self.legacy_admin_context, self.system_member_context,
- self.system_reader_context, self.system_foo_context,
- self.other_project_member_context, self.project_reader_context,
- self.project_foo_context,
- self.other_project_reader_context,
- ]
-
- # Check that system admin is able to unlock the server which is
- # locked by other
- self.admin_authorized_contexts = [
- self.system_admin_context]
- # Check that system non-admin is not able to unlock the server
- # which is locked by other
- self.admin_unauthorized_contexts = [
- self.legacy_admin_context, self.system_member_context,
- self.system_reader_context, self.system_foo_context,
- self.project_admin_context, self.project_member_context,
- self.other_project_member_context,
- self.project_foo_context, self.project_reader_context,
- self.other_project_reader_context,
- ]
+ super(LockServerScopeTypeNoLegacyPolicyTest, self).setUp()
+ # With scope enable and no legacy rule, only project admin/member
+ # will be able to lock/unlock the server.
+ self.project_action_authorized_contexts = (
+ self.project_member_or_admin_with_scope_no_legacy)
-class LockServerOverridePolicyTest(LockServerNoLegacyPolicyTest):
+class LockServerOverridePolicyTest(LockServerScopeTypeNoLegacyPolicyTest):
"""Test Lock Server APIs policies with system and project scoped
but default to system roles only are allowed for project roles
if override by operators. This test is with system scope enable
@@ -198,21 +187,11 @@ class LockServerOverridePolicyTest(LockServerNoLegacyPolicyTest):
def setUp(self):
super(LockServerOverridePolicyTest, self).setUp()
-
- # Check that system admin or project scoped role as override above
- # is able to unlock the server which is locked by other
- self.admin_authorized_contexts = [
- self.system_admin_context,
+ # We are overriding the 'unlock:unlock_override' policy
+ # to PROJECT_MEMBER so testing it with both admin as well
+ # as project member as allowed context.
+ self.project_admin_authorized_contexts = [
self.project_admin_context, self.project_member_context]
- # Check that non-system admin or project role is not able to
- # unlock the server which is locked by other
- self.admin_unauthorized_contexts = [
- self.legacy_admin_context, self.system_member_context,
- self.system_reader_context, self.system_foo_context,
- self.other_project_member_context,
- self.project_foo_context, self.project_reader_context,
- self.other_project_reader_context,
- ]
def test_unlock_override_server_policy(self):
rule = ls_policies.POLICY_ROOT % 'unlock:unlock_override'
@@ -220,6 +199,6 @@ class LockServerOverridePolicyTest(LockServerNoLegacyPolicyTest):
# make unlock allowed for everyone so that we can check unlock
# override policy.
ls_policies.POLICY_ROOT % 'unlock': "@",
- rule: base_policy.PROJECT_MEMBER_OR_SYSTEM_ADMIN}, overwrite=False)
+ rule: base_policy.PROJECT_MEMBER}, overwrite=False)
super(LockServerOverridePolicyTest,
self).test_unlock_override_server_policy()
diff --git a/nova/tests/unit/policies/test_migrate_server.py b/nova/tests/unit/policies/test_migrate_server.py
index 0082b3d414..0f750770d9 100644
--- a/nova/tests/unit/policies/test_migrate_server.py
+++ b/nova/tests/unit/policies/test_migrate_server.py
@@ -10,8 +10,9 @@
# License for the specific language governing permissions and limitations
# under the License.
+from unittest import mock
+
import fixtures
-import mock
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import timeutils
@@ -47,28 +48,19 @@ class MigrateServerPolicyTest(base.BasePolicyTest):
task_state=None, launched_at=timeutils.utcnow())
self.mock_get.return_value = self.instance
- # Check that admin is able to migrate the server.
- self.admin_authorized_contexts = [
+ # With legacy rule, any admin is able to migrate
+ # the server.
+ self.project_admin_authorized_contexts = [
self.legacy_admin_context, self.system_admin_context,
- self.project_admin_context
- ]
- # Check that non-admin is not able to migrate the server
- self.admin_unauthorized_contexts = [
- self.system_member_context, self.system_reader_context,
- self.system_foo_context, self.project_member_context,
- self.project_reader_context, self.project_foo_context,
- self.other_project_member_context,
- self.other_project_reader_context,
- ]
+ self.project_admin_context]
@mock.patch('nova.compute.api.API.resize')
def test_migrate_server_policy(self, mock_resize):
rule_name = ms_policies.POLICY_ROOT % 'migrate'
- self.common_policy_check(self.admin_authorized_contexts,
- self.admin_unauthorized_contexts,
- rule_name, self.controller._migrate,
- self.req, self.instance.uuid,
- body={'migrate': None})
+ self.common_policy_auth(self.project_admin_authorized_contexts,
+ rule_name, self.controller._migrate,
+ self.req, self.instance.uuid,
+ body={'migrate': None})
@mock.patch('nova.compute.api.API.live_migrate')
def test_migrate_live_server_policy(self, mock_live_migrate):
@@ -78,11 +70,18 @@ class MigrateServerPolicyTest(base.BasePolicyTest):
'block_migration': "False",
'disk_over_commit': "False"}
}
- self.common_policy_check(self.admin_authorized_contexts,
- self.admin_unauthorized_contexts,
- rule_name, self.controller._migrate_live,
- self.req, self.instance.uuid,
- body=body)
+ self.common_policy_auth(self.project_admin_authorized_contexts,
+ rule_name, self.controller._migrate_live,
+ self.req, self.instance.uuid,
+ body=body)
+
+
+class MigrateServerNoLegacyNoScopeTest(MigrateServerPolicyTest):
+ """Test Server Migrations API policies with deprecated rules
+ disabled, but scope checking still disabled.
+ """
+
+ without_deprecated_rules = True
class MigrateServerScopeTypePolicyTest(MigrateServerPolicyTest):
@@ -99,32 +98,21 @@ class MigrateServerScopeTypePolicyTest(MigrateServerPolicyTest):
def setUp(self):
super(MigrateServerScopeTypePolicyTest, self).setUp()
self.flags(enforce_scope=True, group="oslo_policy")
+ # With scope enabled, system admin is not allowed.
+ self.project_admin_authorized_contexts = [
+ self.legacy_admin_context, self.project_admin_context]
-class MigrateServerNoLegacyPolicyTest(MigrateServerScopeTypePolicyTest):
+class MigrateServerScopeTypeNoLegacyPolicyTest(
+ MigrateServerScopeTypePolicyTest):
"""Test Migrate Server APIs policies with system scope enabled,
and no more deprecated rules.
"""
without_deprecated_rules = True
- def setUp(self):
- super(MigrateServerNoLegacyPolicyTest, self).setUp()
- # Check that system admin is able to migrate the server.
- self.admin_authorized_contexts = [
- self.system_admin_context
- ]
- # Check that non system admin is not able to migrate the server
- self.admin_unauthorized_contexts = [
- self.legacy_admin_context, self.project_admin_context,
- self.system_member_context, self.system_reader_context,
- self.system_foo_context, self.project_member_context,
- self.project_reader_context, self.project_foo_context,
- self.other_project_member_context,
- self.other_project_reader_context,
- ]
-
-
-class MigrateServerOverridePolicyTest(MigrateServerNoLegacyPolicyTest):
+
+class MigrateServerOverridePolicyTest(
+ MigrateServerScopeTypeNoLegacyPolicyTest):
"""Test Migrate Server APIs policies with system and project scoped
but default to system roles only are allowed for project roles
if override by operators. This test is with system scope enable
@@ -136,23 +124,13 @@ class MigrateServerOverridePolicyTest(MigrateServerNoLegacyPolicyTest):
rule_migrate = ms_policies.POLICY_ROOT % 'migrate'
rule_live_migrate = ms_policies.POLICY_ROOT % 'migrate_live'
# NOTE(gmann): override the rule to project member and verify it
- # work as policy is system and projct scoped.
+ # work as policy is system and project scoped.
self.policy.set_rules({
- rule_migrate: base_policy.PROJECT_MEMBER_OR_SYSTEM_ADMIN,
- rule_live_migrate: base_policy.PROJECT_MEMBER_OR_SYSTEM_ADMIN},
+ rule_migrate: base_policy.PROJECT_MEMBER,
+ rule_live_migrate: base_policy.PROJECT_MEMBER},
overwrite=False)
- # Check that system admin or project scoped role as override above
+ # Check that project member role as override above
# is able to migrate the server
- self.admin_authorized_contexts = [
- self.system_admin_context,
+ self.project_admin_authorized_contexts = [
self.project_admin_context, self.project_member_context]
- # Check that non-system admin or project role is not able to
- # migrate the server
- self.admin_unauthorized_contexts = [
- self.legacy_admin_context, self.system_member_context,
- self.system_reader_context, self.system_foo_context,
- self.other_project_member_context,
- self.other_project_reader_context,
- self.project_foo_context, self.project_reader_context
- ]
diff --git a/nova/tests/unit/policies/test_migrations.py b/nova/tests/unit/policies/test_migrations.py
index a124fa508b..25cd75a125 100644
--- a/nova/tests/unit/policies/test_migrations.py
+++ b/nova/tests/unit/policies/test_migrations.py
@@ -10,7 +10,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from nova.api.openstack.compute import migrations
from nova.policies import migrations as migrations_policies
@@ -32,27 +32,25 @@ class MigrationsPolicyTest(base.BasePolicyTest):
self.controller = migrations.MigrationsController()
self.req = fakes.HTTPRequest.blank('')
- # Check that admin is able to list migrations.
- self.reader_authorized_contexts = [
+ # With legacy rule, any admin is able to list migrations.
+ self.project_admin_authorized_contexts = [
self.legacy_admin_context, self.system_admin_context,
- self.project_admin_context, self.system_member_context,
- self.system_reader_context
- ]
- # Check that non-admin is not able to list migrations.
- self.reader_unauthorized_contexts = [
- self.system_foo_context, self.project_member_context,
- self.project_reader_context, self.project_foo_context,
- self.other_project_member_context,
- self.other_project_reader_context,
- ]
+ self.project_admin_context]
@mock.patch('nova.compute.api.API.get_migrations')
def test_list_migrations_policy(self, mock_migration):
rule_name = migrations_policies.POLICY_ROOT % 'index'
- self.common_policy_check(self.reader_authorized_contexts,
- self.reader_unauthorized_contexts,
- rule_name, self.controller.index,
- self.req)
+ self.common_policy_auth(self.project_admin_authorized_contexts,
+ rule_name, self.controller.index,
+ self.req)
+
+
+class MigrationsNoLegacyNoScopeTest(MigrationsPolicyTest):
+ """Test Migrations API policies with deprecated rules
+ disabled, but scope checking still disabled.
+ """
+
+ without_deprecated_rules = True
class MigrationsScopeTypePolicyTest(MigrationsPolicyTest):
@@ -70,15 +68,14 @@ class MigrationsScopeTypePolicyTest(MigrationsPolicyTest):
super(MigrationsScopeTypePolicyTest, self).setUp()
self.flags(enforce_scope=True, group="oslo_policy")
- # Check that system reader is able to list migrations.
- self.reader_authorized_contexts = [
- self.system_admin_context, self.system_member_context,
- self.system_reader_context]
- # Check that non system reader is not able to list migrations.
- self.reader_unauthorized_contexts = [
- self.legacy_admin_context, self.project_admin_context,
- self.system_foo_context, self.project_member_context,
- self.project_reader_context, self.project_foo_context,
- self.other_project_member_context,
- self.other_project_reader_context,
- ]
+ # With scope enabled, system admin is not allowed.
+ self.project_admin_authorized_contexts = [
+ self.legacy_admin_context, self.project_admin_context]
+
+
+class MigrationsScopeTypeNoLegacyPolicyTest(
+ MigrationsScopeTypePolicyTest):
+ """Test Migrations APIs policies with system scope enabled,
+ and no more deprecated rules.
+ """
+ without_deprecated_rules = True
diff --git a/nova/tests/unit/policies/test_multinic.py b/nova/tests/unit/policies/test_multinic.py
index 21c14bfc57..852ff25965 100644
--- a/nova/tests/unit/policies/test_multinic.py
+++ b/nova/tests/unit/policies/test_multinic.py
@@ -10,8 +10,9 @@
# License for the specific language governing permissions and limitations
# under the License.
+from unittest import mock
+
import fixtures
-import mock
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import timeutils
@@ -45,40 +46,53 @@ class MultinicPolicyTest(base.BasePolicyTest):
id=1, uuid=uuid, vm_state=vm_states.ACTIVE,
task_state=None, launched_at=timeutils.utcnow())
self.mock_get.return_value = self.instance
- # Check that admin or owner is able to add/remove fixed ip.
- self.admin_or_owner_authorized_contexts = [
+ # With legacy rule and no scope checks, all admin, project members
+ # project reader or other project role(because legacy rule allow server
+ # owner- having same project id and no role check) is able to
+ # add/remove fixed ip.
+ self.project_action_authorized_contexts = [
self.legacy_admin_context, self.system_admin_context,
self.project_admin_context, self.project_member_context,
- self.project_reader_context, self.project_foo_context
- ]
- # Check that non-admin and non-owner is not able to add/remove
- # fixed ip.
- self.admin_or_owner_unauthorized_contexts = [
- self.system_member_context, self.system_reader_context,
- self.system_foo_context,
- self.other_project_member_context,
- self.other_project_reader_context,
- ]
+ self.project_reader_context, self.project_foo_context]
@mock.patch('nova.compute.api.API.add_fixed_ip')
def test_add_fixed_ip_policy(self, mock_add):
rule_name = "os_compute_api:os-multinic:add"
body = dict(addFixedIp=dict(networkId='test_net'))
- self.common_policy_check(self.admin_or_owner_authorized_contexts,
- self.admin_or_owner_unauthorized_contexts,
- rule_name, self.controller._add_fixed_ip,
- self.req, self.instance.uuid,
- body=body)
+ self.common_policy_auth(self.project_action_authorized_contexts,
+ rule_name, self.controller._add_fixed_ip,
+ self.req, self.instance.uuid,
+ body=body)
@mock.patch('nova.compute.api.API.remove_fixed_ip')
def test_remove_fixed_ip_policy(self, mock_remove):
rule_name = "os_compute_api:os-multinic:remove"
body = dict(removeFixedIp=dict(address='1.2.3.4'))
- self.common_policy_check(self.admin_or_owner_authorized_contexts,
- self.admin_or_owner_unauthorized_contexts,
- rule_name, self.controller._remove_fixed_ip,
- self.req, self.instance.uuid,
- body=body)
+ self.common_policy_auth(self.project_action_authorized_contexts,
+ rule_name, self.controller._remove_fixed_ip,
+ self.req, self.instance.uuid,
+ body=body)
+
+
+class MultinicNoLegacyNoScopePolicyTest(MultinicPolicyTest):
+ """Test Multinic APIs policies with no legacy deprecated rules
+ and no scope checks which means new defaults only.
+
+ """
+
+ without_deprecated_rules = True
+ rules_without_deprecation = {
+ policies.BASE_POLICY_NAME % 'add':
+ base_policy.PROJECT_MEMBER_OR_ADMIN,
+ policies.BASE_POLICY_NAME % 'remove':
+ base_policy.PROJECT_MEMBER_OR_ADMIN}
+
+ def setUp(self):
+ super(MultinicNoLegacyNoScopePolicyTest, self).setUp()
+ # With no legacy rule, only project admin or member will be
+ # able to add/remove the fixed ip.
+ self.project_action_authorized_contexts = (
+ self.project_member_or_admin_with_no_scope_no_legacy)
class MultinicScopeTypePolicyTest(MultinicPolicyTest):
@@ -95,33 +109,26 @@ class MultinicScopeTypePolicyTest(MultinicPolicyTest):
def setUp(self):
super(MultinicScopeTypePolicyTest, self).setUp()
self.flags(enforce_scope=True, group="oslo_policy")
+ # Scope enable will not allow system admin to add/remove
+ # the fixed ip.
+ self.project_action_authorized_contexts = (
+ self.project_m_r_or_admin_with_scope_and_legacy)
-class MultinicNoLegacyPolicyTest(MultinicScopeTypePolicyTest):
+class MultinicScopeTypeNoLegacyPolicyTest(MultinicScopeTypePolicyTest):
"""Test Multinic APIs policies with system scope enabled,
and no more deprecated rules.
"""
without_deprecated_rules = True
rules_without_deprecation = {
policies.BASE_POLICY_NAME % 'add':
- base_policy.PROJECT_MEMBER_OR_SYSTEM_ADMIN,
+ base_policy.PROJECT_MEMBER_OR_ADMIN,
policies.BASE_POLICY_NAME % 'remove':
- base_policy.PROJECT_MEMBER_OR_SYSTEM_ADMIN}
+ base_policy.PROJECT_MEMBER_OR_ADMIN}
def setUp(self):
- super(MultinicNoLegacyPolicyTest, self).setUp()
- # Check that system admin or owner is able to
- # add/delete Fixed IP to server.
- self.admin_or_owner_authorized_contexts = [
- self.system_admin_context,
- self.project_admin_context, self.project_member_context,
- ]
- # Check that non-system and non-admin/owner is not able
- # to add/delete Fixed IP to server.
- self.admin_or_owner_unauthorized_contexts = [
- self.legacy_admin_context, self.system_member_context,
- self.system_reader_context, self.project_reader_context,
- self.project_foo_context,
- self.system_foo_context, self.other_project_member_context,
- self.other_project_reader_context
- ]
+ super(MultinicScopeTypeNoLegacyPolicyTest, self).setUp()
+ # With scope enable and no legacy rule, only project admin/member
+ # will be able to add/remove the fixed ip.
+ self.project_action_authorized_contexts = (
+ self.project_member_or_admin_with_scope_no_legacy)
diff --git a/nova/tests/unit/policies/test_networks.py b/nova/tests/unit/policies/test_networks.py
index 9ca018835c..9c3e0b735a 100644
--- a/nova/tests/unit/policies/test_networks.py
+++ b/nova/tests/unit/policies/test_networks.py
@@ -10,7 +10,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from oslo_utils.fixture import uuidsentinel as uuids
from nova.api.openstack.compute import networks
@@ -38,7 +39,7 @@ class NetworksPolicyTest(base.BasePolicyTest):
# enforcement so will be passing context's project_id as target to
# policy and always pass. If requester is not admin or owner
# of networks then neutron will be returning the appropriate error.
- self.reader_authorized_contexts = [
+ self.project_reader_authorized_contexts = [
self.legacy_admin_context, self.system_admin_context,
self.project_admin_context, self.project_member_context,
self.project_reader_context, self.project_foo_context,
@@ -47,23 +48,47 @@ class NetworksPolicyTest(base.BasePolicyTest):
self.system_foo_context,
self.other_project_member_context
]
- self.reader_unauthorized_contexts = []
@mock.patch('nova.network.neutron.API.get_all')
def test_list_networks_policy(self, mock_get):
rule_name = "os_compute_api:os-networks:list"
- self.common_policy_check(self.reader_authorized_contexts,
- self.reader_unauthorized_contexts,
- rule_name, self.controller.index,
- self.req)
+ self.common_policy_auth(self.project_reader_authorized_contexts,
+ rule_name, self.controller.index,
+ self.req)
@mock.patch('nova.network.neutron.API.get')
def test_show_network_policy(self, mock_get):
rule_name = "os_compute_api:os-networks:show"
- self.common_policy_check(self.reader_authorized_contexts,
- self.reader_unauthorized_contexts,
- rule_name, self.controller.show,
- self.req, uuids.fake_id)
+ self.common_policy_auth(self.project_reader_authorized_contexts,
+ rule_name, self.controller.show,
+ self.req, uuids.fake_id)
+
+
+class NetworksNoLegacyNoScopePolicyTest(NetworksPolicyTest):
+ """Test Networks APIs policies with no legacy deprecated rules
+ and no scope checks.
+
+ """
+
+ without_deprecated_rules = True
+ rules_without_deprecation = {
+ policies.POLICY_ROOT % 'list':
+ base_policy.PROJECT_READER_OR_ADMIN,
+ policies.POLICY_ROOT % 'show':
+ base_policy.PROJECT_READER_OR_ADMIN}
+
+ def setUp(self):
+ super(NetworksNoLegacyNoScopePolicyTest, self).setUp()
+ # With no legacy, project other roles like foo will not be able
+ # to get network.
+ self.project_reader_authorized_contexts = [
+ self.legacy_admin_context, self.system_admin_context,
+ self.project_admin_context, self.project_member_context,
+ self.project_reader_context,
+ self.other_project_reader_context,
+ self.system_member_context, self.system_reader_context,
+ self.other_project_member_context
+ ]
class NetworksScopeTypePolicyTest(NetworksPolicyTest):
@@ -80,30 +105,30 @@ class NetworksScopeTypePolicyTest(NetworksPolicyTest):
def setUp(self):
super(NetworksScopeTypePolicyTest, self).setUp()
self.flags(enforce_scope=True, group="oslo_policy")
+ self.project_reader_authorized_contexts = [
+ self.legacy_admin_context, self.project_admin_context,
+ self.project_member_context, self.project_reader_context,
+ self.project_foo_context, self.other_project_reader_context,
+ self.other_project_member_context
+ ]
-class NetworksNoLegacyPolicyTest(NetworksScopeTypePolicyTest):
+class NetworksScopeTypeNoLegacyPolicyTest(NetworksScopeTypePolicyTest):
"""Test Networks APIs policies with system scope enabled,
and no more deprecated rules.
"""
without_deprecated_rules = True
rules_without_deprecation = {
policies.POLICY_ROOT % 'list':
- base_policy.PROJECT_READER_OR_SYSTEM_READER,
+ base_policy.PROJECT_READER_OR_ADMIN,
policies.POLICY_ROOT % 'show':
- base_policy.PROJECT_READER_OR_SYSTEM_READER}
+ base_policy.PROJECT_READER_OR_ADMIN}
def setUp(self):
- super(NetworksNoLegacyPolicyTest, self).setUp()
- self.reader_authorized_contexts = [
- self.legacy_admin_context, self.system_admin_context,
- self.project_admin_context,
+ super(NetworksScopeTypeNoLegacyPolicyTest, self).setUp()
+ self.project_reader_authorized_contexts = [
+ self.legacy_admin_context, self.project_admin_context,
self.project_member_context, self.project_reader_context,
- self.system_member_context, self.system_reader_context,
self.other_project_member_context,
self.other_project_reader_context,
]
- self.reader_unauthorized_contexts = [
- self.project_foo_context,
- self.system_foo_context
- ]
diff --git a/nova/tests/unit/policies/test_pause_server.py b/nova/tests/unit/policies/test_pause_server.py
index 73e78bd55d..86a3e616dd 100644
--- a/nova/tests/unit/policies/test_pause_server.py
+++ b/nova/tests/unit/policies/test_pause_server.py
@@ -10,8 +10,9 @@
# License for the specific language governing permissions and limitations
# under the License.
+from unittest import mock
+
import fixtures
-import mock
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import timeutils
@@ -46,41 +47,32 @@ class PauseServerPolicyTest(base.BasePolicyTest):
user_id=user_id, vm_state=vm_states.ACTIVE,
task_state=None, launched_at=timeutils.utcnow())
self.mock_get.return_value = self.instance
-
- # Check that admin or and server owner is able to pause/unpause
- # the server
- self.admin_or_owner_authorized_contexts = [
+ # With legacy rule and no scope checks, all admin, project members
+ # project reader or other project role(because legacy rule allow server
+ # owner- having same project id and no role check) is able to pause,
+ # unpause the server.
+ self.project_action_authorized_contexts = [
self.legacy_admin_context, self.system_admin_context,
self.project_admin_context, self.project_member_context,
self.project_reader_context, self.project_foo_context]
- # Check that non-admin/owner is not able to pause/unpause
- # the server
- self.admin_or_owner_unauthorized_contexts = [
- self.system_member_context, self.system_reader_context,
- self.system_foo_context,
- self.other_project_member_context,
- self.other_project_reader_context,
- ]
@mock.patch('nova.compute.api.API.pause')
def test_pause_server_policy(self, mock_pause):
rule_name = ps_policies.POLICY_ROOT % 'pause'
- self.common_policy_check(self.admin_or_owner_authorized_contexts,
- self.admin_or_owner_unauthorized_contexts,
- rule_name,
- self.controller._pause,
- self.req, self.instance.uuid,
- body={'pause': {}})
+ self.common_policy_auth(self.project_action_authorized_contexts,
+ rule_name,
+ self.controller._pause,
+ self.req, self.instance.uuid,
+ body={'pause': {}})
@mock.patch('nova.compute.api.API.unpause')
def test_unpause_server_policy(self, mock_unpause):
rule_name = ps_policies.POLICY_ROOT % 'unpause'
- self.common_policy_check(self.admin_or_owner_authorized_contexts,
- self.admin_or_owner_unauthorized_contexts,
- rule_name,
- self.controller._unpause,
- self.req, self.instance.uuid,
- body={'unpause': {}})
+ self.common_policy_auth(self.project_action_authorized_contexts,
+ rule_name,
+ self.controller._unpause,
+ self.req, self.instance.uuid,
+ body={'unpause': {}})
def test_pause_server_policy_failed_with_other_user(self):
# Change the user_id in request context.
@@ -105,6 +97,22 @@ class PauseServerPolicyTest(base.BasePolicyTest):
body={'pause': {}})
+class PauseServerNoLegacyNoScopePolicyTest(PauseServerPolicyTest):
+ """Test Pause/unpause server APIs policies with no legacy deprecated rules
+ and no scope checks which means new defaults only.
+
+ """
+
+ without_deprecated_rules = True
+
+ def setUp(self):
+ super(PauseServerNoLegacyNoScopePolicyTest, self).setUp()
+ # With no legacy rule, only project admin or member will be
+ # able to pause/unpause the server.
+ self.project_action_authorized_contexts = (
+ self.project_member_or_admin_with_no_scope_no_legacy)
+
+
class PauseServerScopeTypePolicyTest(PauseServerPolicyTest):
"""Test Pause Server APIs policies with system scope enabled.
This class set the nova.conf [oslo_policy] enforce_scope to True
@@ -118,28 +126,20 @@ class PauseServerScopeTypePolicyTest(PauseServerPolicyTest):
def setUp(self):
super(PauseServerScopeTypePolicyTest, self).setUp()
self.flags(enforce_scope=True, group="oslo_policy")
+ # Scope enable will not allow system admin to pause/unpause the server.
+ self.project_action_authorized_contexts = (
+ self.project_m_r_or_admin_with_scope_and_legacy)
-class PauseServerNoLegacyPolicyTest(PauseServerScopeTypePolicyTest):
+class PauseServerScopeTypeNoLegacyPolicyTest(PauseServerScopeTypePolicyTest):
"""Test Pause Server APIs policies with system scope enabled,
- and no more deprecated rules that allow the legacy admin API to
- access system APIs.
+ and no more deprecated rules.
"""
without_deprecated_rules = True
def setUp(self):
- super(PauseServerNoLegacyPolicyTest, self).setUp()
- # Check that system admin or server owner is able to pause/unpause
- # the server
- self.admin_or_owner_authorized_contexts = [
- self.system_admin_context,
- self.project_admin_context, self.project_member_context]
- # Check that non-system/admin/owner is not able to pause/unpause
- # the server
- self.admin_or_owner_unauthorized_contexts = [
- self.legacy_admin_context, self.system_member_context,
- self.system_reader_context, self.system_foo_context,
- self.other_project_member_context, self.project_reader_context,
- self.project_foo_context,
- self.other_project_reader_context,
- ]
+ super(PauseServerScopeTypeNoLegacyPolicyTest, self).setUp()
+ # With scope enable and no legacy rule, only project admin/member
+ # will be able to pause/unpause the server.
+ self.project_action_authorized_contexts = (
+ self.project_member_or_admin_with_scope_no_legacy)
diff --git a/nova/tests/unit/policies/test_quota_class_sets.py b/nova/tests/unit/policies/test_quota_class_sets.py
index 276c22fac4..09b90d5ebc 100644
--- a/nova/tests/unit/policies/test_quota_class_sets.py
+++ b/nova/tests/unit/policies/test_quota_class_sets.py
@@ -10,7 +10,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from nova.api.openstack.compute import quota_classes
from nova.policies import quota_class_sets as policies
@@ -31,30 +31,12 @@ class QuotaClassSetsPolicyTest(base.BasePolicyTest):
self.controller = quota_classes.QuotaClassSetsController()
self.req = fakes.HTTPRequest.blank('')
- # Check that admin is able to update quota class
- self.admin_authorized_contexts = [
+ # With legacy rule and scope check disabled by default, system admin,
+ # legacy admin, and project admin will be able to get, update quota
+ # class.
+ self.project_admin_authorized_contexts = [
self.legacy_admin_context, self.system_admin_context,
self.project_admin_context]
- # Check that non-admin is not able to update quota class
- self.admin_unauthorized_contexts = [
- self.system_member_context, self.system_reader_context,
- self.system_foo_context, self.project_member_context,
- self.project_reader_context, self.project_foo_context,
- self.other_project_member_context,
- self.other_project_reader_context,
- ]
- # Check that system reader is able to get quota class
- self.system_reader_authorized_contexts = [
- self.legacy_admin_context, self.system_admin_context,
- self.project_admin_context, self.system_member_context,
- self.system_reader_context]
- # Check that non-system reader is not able to get quota class
- self.system_reader_unauthorized_contexts = [
- self.system_foo_context, self.project_member_context,
- self.project_reader_context, self.project_foo_context,
- self.other_project_member_context,
- self.other_project_reader_context,
- ]
@mock.patch('nova.objects.Quotas.update_class')
def test_update_quota_class_sets_policy(self, mock_update):
@@ -64,21 +46,30 @@ class QuotaClassSetsPolicyTest(base.BasePolicyTest):
'ram': 51200, 'floating_ips': -1,
'fixed_ips': -1, 'instances': 10,
'injected_files': 5, 'cores': 20}}
- self.common_policy_check(self.admin_authorized_contexts,
- self.admin_unauthorized_contexts,
- rule_name,
- self.controller.update,
- self.req, 'test_class',
- body=body)
+ self.common_policy_auth(self.project_admin_authorized_contexts,
+ rule_name,
+ self.controller.update,
+ self.req, 'test_class',
+ body=body)
@mock.patch('nova.quota.QUOTAS.get_class_quotas')
def test_show_quota_class_sets_policy(self, mock_get):
rule_name = policies.POLICY_ROOT % 'show'
- self.common_policy_check(self.system_reader_authorized_contexts,
- self.system_reader_unauthorized_contexts,
- rule_name,
- self.controller.show,
- self.req, 'test_class')
+ self.common_policy_auth(self.project_admin_authorized_contexts,
+ rule_name,
+ self.controller.show,
+ self.req, 'test_class')
+
+
+class QuotaClassSetsNoLegacyNoScopePolicyTest(QuotaClassSetsPolicyTest):
+ """Test QuotaClassSets APIs policies with no legacy deprecated rules
+ and no scope checks which means new defaults only. In this case
+ system admin, legacy admin, and project admin will be able to get
+ update quota class. Legacy admin will be allowed as policy
+ is just admin if no scope checks.
+
+ """
+ without_deprecated_rules = True
class QuotaClassSetsScopeTypePolicyTest(QuotaClassSetsPolicyTest):
@@ -94,38 +85,17 @@ class QuotaClassSetsScopeTypePolicyTest(QuotaClassSetsPolicyTest):
def setUp(self):
super(QuotaClassSetsScopeTypePolicyTest, self).setUp()
self.flags(enforce_scope=True, group="oslo_policy")
- # Check that system admin is able to update and get quota class
- self.admin_authorized_contexts = [
- self.system_admin_context]
- # Check that non-system/admin is not able to update and get quota class
- self.admin_unauthorized_contexts = [
- self.legacy_admin_context, self.system_member_context,
- self.system_reader_context, self.project_admin_context,
- self.system_foo_context, self.project_member_context,
- self.project_reader_context, self.project_foo_context,
- self.other_project_member_context,
- self.other_project_reader_context,
- ]
- # Check that system reader is able to get quota class
- self.system_reader_authorized_contexts = [
- self.system_admin_context, self.system_member_context,
- self.system_reader_context]
- # Check that non-system reader is not able to get quota class
- self.system_reader_unauthorized_contexts = [
- self.legacy_admin_context, self.project_admin_context,
- self.system_foo_context, self.project_member_context,
- self.project_reader_context, self.project_foo_context,
- self.other_project_member_context,
- self.other_project_reader_context,
- ]
-
-
-class QuotaClassSetsNoLegacyPolicyTest(QuotaClassSetsScopeTypePolicyTest):
- """Test Quota Class Sets APIs policies with system scope enabled,
- and no more deprecated rules that allow the legacy admin API to
- access system APIs.
+
+ # With scope checks enable, only project admins are able to
+ # update and get quota class.
+ self.project_admin_authorized_contexts = [self.legacy_admin_context,
+ self.project_admin_context]
+
+
+class QuotaClassScopeTypeNoLegacyPolicyTest(QuotaClassSetsScopeTypePolicyTest):
+ """Test QuotaClassSets APIs policies with no legacy deprecated rules
+ and scope checks enabled which means scope + new defaults so
+ only system admin is able to update and get quota class.
+
"""
without_deprecated_rules = True
-
- def setUp(self):
- super(QuotaClassSetsNoLegacyPolicyTest, self).setUp()
diff --git a/nova/tests/unit/policies/test_quota_sets.py b/nova/tests/unit/policies/test_quota_sets.py
index 0b8d15c384..3ff8cd1c02 100644
--- a/nova/tests/unit/policies/test_quota_sets.py
+++ b/nova/tests/unit/policies/test_quota_sets.py
@@ -10,9 +10,10 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from nova.api.openstack.compute import quota_sets
+from nova import exception
from nova.policies import quota_sets as policies
from nova.tests.unit.api.openstack import fakes
from nova.tests.unit.policies import base
@@ -33,55 +34,29 @@ class QuotaSetsPolicyTest(base.BasePolicyTest):
self.req = fakes.HTTPRequest.blank('')
self.project_id = self.req.environ['nova.context'].project_id
- # Check that admin is able to update or revert quota
- # to default.
- self.admin_authorized_contexts = [
+ # With legacy rule all admin is able to update or revert their quota
+ # to default or get other project quota.
+ self.project_admin_authorized_contexts = set([
self.legacy_admin_context, self.system_admin_context,
- self.project_admin_context]
- # Check that non-admin is not able to update or revert
- # quota to default.
- self.admin_unauthorized_contexts = [
- self.system_member_context, self.system_reader_context,
- self.system_foo_context, self.project_member_context,
- self.project_reader_context, self.project_foo_context,
- self.other_project_member_context,
- self.other_project_reader_context
- ]
- # Check that system reader is able to get another project's quota.
- self.system_reader_authorized_contexts = [
- self.legacy_admin_context, self.system_admin_context,
- self.project_admin_context, self.system_member_context,
- self.system_reader_context]
- # Check that non-system reader is not able to get another
- # project's quota.
- self.system_reader_unauthorized_contexts = [
- self.system_foo_context, self.project_member_context,
- self.project_reader_context, self.project_foo_context,
- self.other_project_member_context,
- self.other_project_reader_context
- ]
- # Check that everyone is able to get the default quota or
- # their own quota.
- self.everyone_authorized_contexts = [
+ self.project_admin_context])
+ # With legacy rule, everyone is able to get their own quota.
+ self.project_reader_authorized_contexts = set([
self.legacy_admin_context, self.system_admin_context,
self.project_admin_context,
self.system_member_context, self.system_reader_context,
self.system_foo_context, self.project_member_context,
self.project_reader_context, self.project_foo_context,
self.other_project_member_context,
- self.other_project_reader_context
- ]
- self.everyone_unauthorized_contexts = []
- # Check that system reader or owner is able to get their own quota.
- self.system_reader_or_owner_authorized_contexts = [
+ self.other_project_reader_context])
+ # Everyone is able to get the default quota
+ self.everyone_authorized_contexts = set([
self.legacy_admin_context, self.system_admin_context,
self.project_admin_context,
self.system_member_context, self.system_reader_context,
self.system_foo_context, self.project_member_context,
self.project_reader_context, self.project_foo_context,
self.other_project_member_context,
- self.other_project_reader_context
- ]
+ self.other_project_reader_context])
@mock.patch('nova.quota.QUOTAS.get_project_quotas')
@mock.patch('nova.quota.QUOTAS.get_settable_quotas')
@@ -91,41 +66,57 @@ class QuotaSetsPolicyTest(base.BasePolicyTest):
'instances': 50,
'cores': 50}
}
- self.common_policy_check(self.admin_authorized_contexts,
- self.admin_unauthorized_contexts,
- rule_name,
- self.controller.update,
- self.req, self.project_id,
- body=body)
+ for cxtx in self.project_admin_authorized_contexts:
+ req = fakes.HTTPRequest.blank('')
+ req.environ['nova.context'] = cxtx
+ self.controller.update(req, cxtx.project_id, body=body)
+ for cxtx in (self.all_contexts -
+ set(self.project_admin_authorized_contexts)):
+ req = fakes.HTTPRequest.blank('')
+ req.environ['nova.context'] = cxtx
+ exc = self.assertRaises(
+ exception.PolicyNotAuthorized, self.controller.update,
+ req, cxtx.project_id, body=body)
+ self.assertEqual(
+ "Policy doesn't allow %s to be performed." % rule_name,
+ exc.format_message())
@mock.patch('nova.objects.Quotas.destroy_all_by_project')
def test_delete_quota_sets_policy(self, mock_delete):
rule_name = policies.POLICY_ROOT % 'delete'
- self.common_policy_check(self.admin_authorized_contexts,
- self.admin_unauthorized_contexts,
- rule_name,
- self.controller.delete,
- self.req, self.project_id)
+ for cxtx in self.project_admin_authorized_contexts:
+ req = fakes.HTTPRequest.blank('')
+ req.environ['nova.context'] = cxtx
+ self.controller.delete(req, cxtx.project_id)
+ for cxtx in (self.all_contexts -
+ set(self.project_admin_authorized_contexts)):
+ req = fakes.HTTPRequest.blank('')
+ req.environ['nova.context'] = cxtx
+ exc = self.assertRaises(
+ exception.PolicyNotAuthorized, self.controller.delete,
+ req, cxtx.project_id)
+ self.assertEqual(
+ "Policy doesn't allow %s to be performed." % rule_name,
+ exc.format_message())
@mock.patch('nova.quota.QUOTAS.get_defaults')
def test_default_quota_sets_policy(self, mock_default):
rule_name = policies.POLICY_ROOT % 'defaults'
- self.common_policy_check(self.everyone_authorized_contexts,
- self.everyone_unauthorized_contexts,
- rule_name,
- self.controller.defaults,
- self.req, self.project_id)
+ self.common_policy_auth(self.everyone_authorized_contexts,
+ rule_name,
+ self.controller.defaults,
+ self.req, self.project_id)
@mock.patch('nova.quota.QUOTAS.get_project_quotas')
def test_detail_quota_sets_policy(self, mock_get):
rule_name = policies.POLICY_ROOT % 'detail'
- self.common_policy_check(self.system_reader_authorized_contexts,
- self.system_reader_unauthorized_contexts,
- rule_name,
- self.controller.detail,
- self.req, 'try-other-project')
- # Check if everyone (owner) is able to get their own quota
- for cxtx in self.system_reader_or_owner_authorized_contexts:
+ self.common_policy_auth(self.project_admin_authorized_contexts,
+ rule_name,
+ self.controller.detail,
+ self.req, 'try-other-project')
+ # Check if project reader or higher roles are able to get
+ # their own quota
+ for cxtx in self.project_reader_authorized_contexts:
req = fakes.HTTPRequest.blank('')
req.environ['nova.context'] = cxtx
self.controller.detail(req, cxtx.project_id)
@@ -133,18 +124,44 @@ class QuotaSetsPolicyTest(base.BasePolicyTest):
@mock.patch('nova.quota.QUOTAS.get_project_quotas')
def test_show_quota_sets_policy(self, mock_get):
rule_name = policies.POLICY_ROOT % 'show'
- self.common_policy_check(self.system_reader_authorized_contexts,
- self.system_reader_unauthorized_contexts,
- rule_name,
- self.controller.show,
- self.req, 'try-other-project')
- # Check if everyone (owner) is able to get their own quota
- for cxtx in self.system_reader_or_owner_authorized_contexts:
+ self.common_policy_auth(self.project_admin_authorized_contexts,
+ rule_name,
+ self.controller.show,
+ self.req, 'try-other-project')
+ # Check if project reader or higher roles are able to get
+ # their own quota
+ for cxtx in self.project_reader_authorized_contexts:
req = fakes.HTTPRequest.blank('')
req.environ['nova.context'] = cxtx
self.controller.show(req, cxtx.project_id)
+class QuotaSetsNoLegacyNoScopePolicyTest(QuotaSetsPolicyTest):
+ """Test QuotaSets APIs policies with no legacy deprecated rules
+ and no scope checks.
+
+ """
+
+ without_deprecated_rules = True
+
+ def setUp(self):
+ super(QuotaSetsNoLegacyNoScopePolicyTest, self).setUp()
+ # Even with no legacy rule, because any admin requesting
+ # update/revert quota for their own project will be allowed.
+ # And any admin will be able to get other project quota.
+ self.project_admin_authorized_contexts = [
+ self.legacy_admin_context, self.system_admin_context,
+ self.project_admin_context]
+ # With no legacy rule, other project and foo role will not be
+ # able to get the quota.
+ self.project_reader_authorized_contexts = [
+ self.legacy_admin_context, self.system_admin_context,
+ self.project_admin_context,
+ self.system_member_context, self.system_reader_context,
+ self.project_member_context,
+ self.project_reader_context]
+
+
class QuotaSetsScopeTypePolicyTest(QuotaSetsPolicyTest):
"""Test Quota Sets APIs policies with system scope enabled.
This class set the nova.conf [oslo_policy] enforce_scope to True
@@ -159,23 +176,16 @@ class QuotaSetsScopeTypePolicyTest(QuotaSetsPolicyTest):
super(QuotaSetsScopeTypePolicyTest, self).setUp()
self.flags(enforce_scope=True, group="oslo_policy")
- # Check that system admin is able to update or revert quota
- # to default.
- self.admin_authorized_contexts = [
- self.system_admin_context]
- # Check that non-system admin is not able to update or revert
- # quota to default.
- self.admin_unauthorized_contexts = [
- self.legacy_admin_context, self.system_member_context,
- self.project_admin_context, self.system_reader_context,
- self.system_foo_context, self.project_member_context,
- self.project_reader_context, self.project_foo_context,
- self.other_project_member_context,
- self.other_project_reader_context
- ]
+ # With scope enabled, system users will be disallowed.
+ self.reduce_set('project_admin_authorized', set([
+ self.legacy_admin_context,
+ self.project_admin_context]))
+ self.reduce_set('project_reader_authorized',
+ self.all_project_contexts)
+ self.everyone_authorized_contexts = self.all_project_contexts
-class QuotaSetsNoLegacyPolicyTest(QuotaSetsScopeTypePolicyTest):
+class QuotaSetsScopeTypeNoLegacyPolicyTest(QuotaSetsScopeTypePolicyTest):
"""Test Quota Sets APIs policies with system scope enabled,
and no more deprecated rules that allow the legacy admin API to
access system APIs.
@@ -183,28 +193,9 @@ class QuotaSetsNoLegacyPolicyTest(QuotaSetsScopeTypePolicyTest):
without_deprecated_rules = True
def setUp(self):
- super(QuotaSetsNoLegacyPolicyTest, self).setUp()
-
- # Check that system reader is able to get another project's quota.
- self.system_reader_authorized_contexts = [
- self.system_admin_context, self.system_member_context,
- self.system_reader_context]
- # Check that non-system reader is not able to get anotherproject's
- # quota.
- self.system_reader_unauthorized_contexts = [
- self.legacy_admin_context, self.project_admin_context,
- self.system_foo_context, self.project_member_context,
- self.project_reader_context, self.project_foo_context,
- self.other_project_member_context,
- self.other_project_reader_context
- ]
- # Check that everyone is able to get their own quota.
- self.system_reader_or_owner_authorized_contexts = [
- self.legacy_admin_context, self.system_admin_context,
- self.project_admin_context,
- self.system_member_context, self.system_reader_context,
- self.project_member_context,
- self.project_reader_context,
- self.other_project_member_context,
- self.other_project_reader_context
- ]
+ super(QuotaSetsScopeTypeNoLegacyPolicyTest, self).setUp()
+ # With scope enabled and no legacy, system and
+ # non-reader/member users are disallowed.
+ self.reduce_set('project_reader_authorized',
+ self.all_project_contexts -
+ set([self.project_foo_context]))
diff --git a/nova/tests/unit/policies/test_remote_consoles.py b/nova/tests/unit/policies/test_remote_consoles.py
index 825f78a938..a441d1c550 100644
--- a/nova/tests/unit/policies/test_remote_consoles.py
+++ b/nova/tests/unit/policies/test_remote_consoles.py
@@ -10,8 +10,9 @@
# License for the specific language governing permissions and limitations
# under the License.
+from unittest import mock
+
import fixtures
-import mock
from nova.policies import remote_consoles as rc_policies
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import timeutils
@@ -48,31 +49,38 @@ class RemoteConsolesPolicyTest(base.BasePolicyTest):
user_id=user_id, vm_state=vm_states.ACTIVE,
task_state=None, launched_at=timeutils.utcnow())
self.mock_get.return_value = self.instance
-
- # Check that admin or and server owner is able to get server
- # remote consoles.
- self.admin_or_owner_authorized_contexts = [
+ # With legacy rule and no scope checks, all admin, project members
+ # project reader or other project role(because legacy rule allow server
+ # owner- having same project id and no role check) is able to get
+ # server remote consoles.
+ self.project_action_authorized_contexts = [
self.legacy_admin_context, self.system_admin_context,
self.project_admin_context, self.project_member_context,
self.project_reader_context, self.project_foo_context]
- # Check that non-admin/owner is not able to get server
- # remote consoles.
- self.admin_or_owner_unauthorized_contexts = [
- self.system_member_context, self.system_reader_context,
- self.system_foo_context,
- self.other_project_member_context,
- self.other_project_reader_context,
- ]
def test_create_console_policy(self):
rule_name = rc_policies.BASE_POLICY_NAME
body = {'remote_console': {'protocol': 'vnc', 'type': 'novnc'}}
- self.common_policy_check(self.admin_or_owner_authorized_contexts,
- self.admin_or_owner_unauthorized_contexts,
- rule_name,
- self.controller.create,
- self.req, self.instance.uuid,
- body=body)
+ self.common_policy_auth(self.project_action_authorized_contexts,
+ rule_name,
+ self.controller.create,
+ self.req, self.instance.uuid,
+ body=body)
+
+
+class RemoteConsolesNoLegacyNoScopePolicyTest(RemoteConsolesPolicyTest):
+ """Test Remote Consoles APIs policies with no legacy deprecated rules
+ and no scope checks which means new defaults only.
+
+ """
+ without_deprecated_rules = True
+
+ def setUp(self):
+ super(RemoteConsolesNoLegacyNoScopePolicyTest, self).setUp()
+ # With no legacy rule, only project admin or member will be
+ # able get server remote consoles.
+ self.project_action_authorized_contexts = (
+ self.project_member_or_admin_with_no_scope_no_legacy)
class RemoteConsolesScopeTypePolicyTest(RemoteConsolesPolicyTest):
@@ -88,9 +96,14 @@ class RemoteConsolesScopeTypePolicyTest(RemoteConsolesPolicyTest):
def setUp(self):
super(RemoteConsolesScopeTypePolicyTest, self).setUp()
self.flags(enforce_scope=True, group="oslo_policy")
+ # Scope enable will not allow system admin to get server
+ # remote console.
+ self.project_action_authorized_contexts = (
+ self.project_m_r_or_admin_with_scope_and_legacy)
-class RemoteConsolesNoLegacyPolicyTest(RemoteConsolesScopeTypePolicyTest):
+class RemoteConsolesScopeTypeNoLegacyPolicyTest(
+ RemoteConsolesScopeTypePolicyTest):
"""Test Remote Consoles APIs policies with system scope enabled,
and no more deprecated rules that allow the legacy admin API to
access system APIs.
@@ -98,18 +111,8 @@ class RemoteConsolesNoLegacyPolicyTest(RemoteConsolesScopeTypePolicyTest):
without_deprecated_rules = True
def setUp(self):
- super(RemoteConsolesNoLegacyPolicyTest, self).setUp()
- # Check that system admin or and server owner is able to get server
- # remote consoles.
- self.admin_or_owner_authorized_contexts = [
- self.system_admin_context,
- self.project_admin_context, self.project_member_context]
- # Check that non-system/admin/owner is not able to get server
- # remote consoles.
- self.admin_or_owner_unauthorized_contexts = [
- self.legacy_admin_context, self.system_member_context,
- self.system_reader_context, self.system_foo_context,
- self.other_project_member_context, self.project_reader_context,
- self.project_foo_context,
- self.other_project_reader_context,
- ]
+ super(RemoteConsolesScopeTypeNoLegacyPolicyTest, self).setUp()
+ # With scope enable and no legacy rule, only project admin/member
+ # will be able to get server remote console.
+ self.project_action_authorized_contexts = (
+ self.project_member_or_admin_with_scope_no_legacy)
diff --git a/nova/tests/unit/policies/test_rescue.py b/nova/tests/unit/policies/test_rescue.py
index f970425b40..120809877c 100644
--- a/nova/tests/unit/policies/test_rescue.py
+++ b/nova/tests/unit/policies/test_rescue.py
@@ -10,8 +10,9 @@
# License for the specific language governing permissions and limitations
# under the License.
+from unittest import mock
+
import fixtures
-import mock
from nova.policies import base as base_policy
from nova.policies import rescue as rs_policies
from oslo_utils.fixture import uuidsentinel as uuids
@@ -48,40 +49,32 @@ class RescueServerPolicyTest(base.BasePolicyTest):
task_state=None, launched_at=timeutils.utcnow())
self.mock_get.return_value = self.instance
- # Check that admin or and server owner is able to rescue/unrescue
- # the sevrer
- self.admin_or_owner_authorized_contexts = [
+ # With legacy rule and no scope checks, all admin, project members
+ # project reader or other project role(because legacy rule allow server
+ # owner- having same project id and no role check) is able to rescue,
+ # unrescue the server.
+ self.project_action_authorized_contexts = [
self.legacy_admin_context, self.system_admin_context,
self.project_admin_context, self.project_member_context,
self.project_reader_context, self.project_foo_context]
- # Check that non-admin/owner is not able to rescue/unrescue
- # the server
- self.admin_or_owner_unauthorized_contexts = [
- self.system_member_context, self.system_reader_context,
- self.system_foo_context,
- self.other_project_member_context,
- self.other_project_reader_context,
- ]
@mock.patch('nova.compute.api.API.rescue')
def test_rescue_server_policy(self, mock_rescue):
rule_name = rs_policies.BASE_POLICY_NAME
- self.common_policy_check(self.admin_or_owner_authorized_contexts,
- self.admin_or_owner_unauthorized_contexts,
- rule_name,
- self.controller._rescue,
- self.req, self.instance.uuid,
- body={'rescue': {}})
+ self.common_policy_auth(self.project_action_authorized_contexts,
+ rule_name,
+ self.controller._rescue,
+ self.req, self.instance.uuid,
+ body={'rescue': {}})
@mock.patch('nova.compute.api.API.unrescue')
def test_unrescue_server_policy(self, mock_unrescue):
rule_name = rs_policies.UNRESCUE_POLICY_NAME
- self.common_policy_check(self.admin_or_owner_authorized_contexts,
- self.admin_or_owner_unauthorized_contexts,
- rule_name,
- self.controller._unrescue,
- self.req, self.instance.uuid,
- body={'unrescue': {}})
+ self.common_policy_auth(self.project_action_authorized_contexts,
+ rule_name,
+ self.controller._unrescue,
+ self.req, self.instance.uuid,
+ body={'unrescue': {}})
def test_rescue_server_policy_failed_with_other_user(self):
# Change the user_id in request context.
@@ -97,7 +90,7 @@ class RescueServerPolicyTest(base.BasePolicyTest):
exc.format_message())
@mock.patch('nova.compute.api.API.rescue')
- def test_rescue_sevrer_overridden_policy_pass_with_same_user(
+ def test_rescue_server_overridden_policy_pass_with_same_user(
self, mock_rescue):
rule_name = rs_policies.BASE_POLICY_NAME
self.policy.set_rules({rule_name: "user_id:%(user_id)s"})
@@ -106,6 +99,27 @@ class RescueServerPolicyTest(base.BasePolicyTest):
body={'rescue': {}})
+class RescueServerNoLegacyNoScopePolicyTest(RescueServerPolicyTest):
+ """Test rescue/unrescue server APIs policies with no legacy deprecated rules
+ and no scope checks which means new defaults only.
+
+ """
+
+ without_deprecated_rules = True
+ rules_without_deprecation = {
+ rs_policies.UNRESCUE_POLICY_NAME:
+ base_policy.PROJECT_MEMBER_OR_ADMIN,
+ rs_policies.BASE_POLICY_NAME:
+ base_policy.PROJECT_MEMBER_OR_ADMIN}
+
+ def setUp(self):
+ super(RescueServerNoLegacyNoScopePolicyTest, self).setUp()
+ # With no legacy rule, only project admin or member will be
+ # able to rescue/unrescue the server.
+ self.project_action_authorized_contexts = (
+ self.project_member_or_admin_with_no_scope_no_legacy)
+
+
class RescueServerScopeTypePolicyTest(RescueServerPolicyTest):
"""Test Rescue Server APIs policies with system scope enabled.
This class set the nova.conf [oslo_policy] enforce_scope to True
@@ -119,9 +133,13 @@ class RescueServerScopeTypePolicyTest(RescueServerPolicyTest):
def setUp(self):
super(RescueServerScopeTypePolicyTest, self).setUp()
self.flags(enforce_scope=True, group="oslo_policy")
+ # Scope enable will not allow system admin to rescue/unrescue the
+ # server.
+ self.project_action_authorized_contexts = (
+ self.project_m_r_or_admin_with_scope_and_legacy)
-class RescueServerNoLegacyPolicyTest(RescueServerScopeTypePolicyTest):
+class RescueServerScopeTypeNoLegacyPolicyTest(RescueServerScopeTypePolicyTest):
"""Test Rescue Server APIs policies with system scope enabled,
and no more deprecated rules that allow the legacy admin API to
access system APIs.
@@ -129,23 +147,13 @@ class RescueServerNoLegacyPolicyTest(RescueServerScopeTypePolicyTest):
without_deprecated_rules = True
rules_without_deprecation = {
rs_policies.UNRESCUE_POLICY_NAME:
- base_policy.PROJECT_MEMBER_OR_SYSTEM_ADMIN,
+ base_policy.PROJECT_MEMBER_OR_ADMIN,
rs_policies.BASE_POLICY_NAME:
- base_policy.PROJECT_MEMBER_OR_SYSTEM_ADMIN}
+ base_policy.PROJECT_MEMBER_OR_ADMIN}
def setUp(self):
- super(RescueServerNoLegacyPolicyTest, self).setUp()
- # Check that system admin or and server owner is able to
- # rescue/unrescue the sevrer
- self.admin_or_owner_authorized_contexts = [
- self.system_admin_context,
- self.project_admin_context, self.project_member_context]
- # Check that non-system/admin/owner is not able to rescue/unrescue
- # the server
- self.admin_or_owner_unauthorized_contexts = [
- self.legacy_admin_context, self.system_member_context,
- self.system_reader_context, self.system_foo_context,
- self.other_project_member_context, self.project_reader_context,
- self.project_foo_context,
- self.other_project_reader_context,
- ]
+ super(RescueServerScopeTypeNoLegacyPolicyTest, self).setUp()
+ # With scope enable and no legacy rule, only project admin/member
+ # will be able to rescue/unrescue the server.
+ self.project_action_authorized_contexts = (
+ self.project_member_or_admin_with_scope_no_legacy)
diff --git a/nova/tests/unit/policies/test_security_groups.py b/nova/tests/unit/policies/test_security_groups.py
index 5fb35f83a0..a9d2f484ba 100644
--- a/nova/tests/unit/policies/test_security_groups.py
+++ b/nova/tests/unit/policies/test_security_groups.py
@@ -10,8 +10,9 @@
# License for the specific language governing permissions and limitations
# under the License.
+from unittest import mock
+
import fixtures
-import mock
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import timeutils
@@ -48,62 +49,75 @@ class ServerSecurityGroupsPolicyTest(base.BasePolicyTest):
task_state=None, launched_at=timeutils.utcnow())
self.mock_get.return_value = self.instance
- # Check that admin or and server owner is able to operate
+ # With legacy rule and no scope checks, all admin, project members
+ # project reader or other project role(because legacy rule allow server
+ # owner- having same project id and no role check) is able to operate
# server security groups.
- self.admin_or_owner_authorized_contexts = [
+ self.project_member_authorized_contexts = [
self.legacy_admin_context, self.system_admin_context,
self.project_admin_context, self.project_member_context,
self.project_reader_context, self.project_foo_context]
- # Check that non-admin/owner is not able to operate
- # server security groups.
- self.admin_or_owner_unauthorized_contexts = [
- self.system_member_context, self.system_reader_context,
- self.system_foo_context, self.other_project_member_context,
- self.other_project_reader_context
- ]
-
- self.reader_authorized_contexts = [
+ # With legacy rule, any admin or project role is able to get their
+ # server SG.
+ self.project_reader_authorized_contexts = [
self.legacy_admin_context, self.system_admin_context,
- self.project_admin_context, self.system_member_context,
- self.system_reader_context, self.project_reader_context,
- self.project_member_context, self.project_foo_context
- ]
-
- self.reader_unauthorized_contexts = [
- self.system_foo_context, self.other_project_member_context,
- self.other_project_reader_context
+ self.project_admin_context, self.project_member_context,
+ self.project_reader_context, self.project_foo_context,
]
@mock.patch('nova.network.security_group_api.get_instance_security_groups')
def test_get_security_groups_policy(self, mock_get):
rule_name = policies.POLICY_NAME % 'list'
- self.common_policy_check(self.reader_authorized_contexts,
- self.reader_unauthorized_contexts,
- rule_name,
- self.controller.index,
- self.req, self.instance.uuid)
+ self.common_policy_auth(self.project_reader_authorized_contexts,
+ rule_name,
+ self.controller.index,
+ self.req, self.instance.uuid)
@mock.patch('nova.network.security_group_api.add_to_instance')
def test_add_security_groups_policy(self, mock_add):
rule_name = policies.POLICY_NAME % 'add'
- self.common_policy_check(self.admin_or_owner_authorized_contexts,
- self.admin_or_owner_unauthorized_contexts,
- rule_name,
- self.action_ctr._addSecurityGroup,
- self.req, self.instance.uuid,
- body={'addSecurityGroup':
- {'name': 'fake'}})
+ self.common_policy_auth(self.project_member_authorized_contexts,
+ rule_name,
+ self.action_ctr._addSecurityGroup,
+ self.req, self.instance.uuid,
+ body={'addSecurityGroup':
+ {'name': 'fake'}})
@mock.patch('nova.network.security_group_api.remove_from_instance')
def test_remove_security_groups_policy(self, mock_remove):
rule_name = policies.POLICY_NAME % 'remove'
- self.common_policy_check(self.admin_or_owner_authorized_contexts,
- self.admin_or_owner_unauthorized_contexts,
- rule_name,
- self.action_ctr._removeSecurityGroup,
- self.req, self.instance.uuid,
- body={'removeSecurityGroup':
- {'name': 'fake'}})
+ self.common_policy_auth(self.project_member_authorized_contexts,
+ rule_name,
+ self.action_ctr._removeSecurityGroup,
+ self.req, self.instance.uuid,
+ body={'removeSecurityGroup':
+ {'name': 'fake'}})
+
+
+class ServerSecurityGroupsNoLegacyNoScopePolicyTest(
+ ServerSecurityGroupsPolicyTest):
+ """Test Server Security Groups server APIs policies with no legacy
+ deprecated rules and no scope checks.
+
+ """
+
+ without_deprecated_rules = True
+ rules_without_deprecation = {
+ policies.POLICY_NAME % 'list':
+ base_policy.PROJECT_READER_OR_ADMIN,
+ policies.POLICY_NAME % 'add':
+ base_policy.PROJECT_MEMBER_OR_ADMIN,
+ policies.POLICY_NAME % 'remove':
+ base_policy.PROJECT_MEMBER_OR_ADMIN}
+
+ def setUp(self):
+ super(ServerSecurityGroupsNoLegacyNoScopePolicyTest, self).setUp()
+ # With no legacy rule, only project admin or member will be
+ # able to add/remove SG to server and reader to get SG.
+ self.project_member_authorized_contexts = (
+ self.project_member_or_admin_with_no_scope_no_legacy)
+ self.project_reader_authorized_contexts = (
+ self.project_reader_or_admin_with_no_scope_no_legacy)
class SecurityGroupsPolicyTest(base.BasePolicyTest):
@@ -120,14 +134,23 @@ class SecurityGroupsPolicyTest(base.BasePolicyTest):
self.rule_ctr = security_groups.SecurityGroupRulesController()
self.req = fakes.HTTPRequest.blank('')
- # Check that everyone is able to perform crud operation on.
- # security groups.
+ # With legacy and scope disabled, everyone is able to perform crud
+ # operation on security groups.
# NOTE(gmann): Nova cannot verify the security groups owner during
# nova policy enforcement so will be passing context's project_id
# as target to policy and always pass. If requester is not admin
# or owner of security groups then neutron will be returning the
# appropriate error.
- self.reader_authorized_contexts = [
+ self.project_member_authorized_contexts = [
+ self.legacy_admin_context, self.system_admin_context,
+ self.project_admin_context, self.project_member_context,
+ self.project_reader_context, self.project_foo_context,
+ self.other_project_reader_context,
+ self.system_member_context, self.system_reader_context,
+ self.system_foo_context,
+ self.other_project_member_context
+ ]
+ self.project_reader_authorized_contexts = [
self.legacy_admin_context, self.system_admin_context,
self.project_admin_context, self.project_member_context,
self.project_reader_context, self.project_foo_context,
@@ -136,29 +159,22 @@ class SecurityGroupsPolicyTest(base.BasePolicyTest):
self.system_foo_context,
self.other_project_member_context
]
- self.reader_unauthorized_contexts = []
- self.sys_admin_or_owner_authorized_contexts = (
- self.reader_authorized_contexts)
- self.sys_admin_or_owner_unauthorized_contexts = (
- self.reader_unauthorized_contexts)
@mock.patch('nova.network.security_group_api.list')
def test_list_security_groups_policy(self, mock_get):
rule_name = policies.POLICY_NAME % 'get'
- self.common_policy_check(self.reader_authorized_contexts,
- self.reader_unauthorized_contexts,
- rule_name,
- self.controller.index,
- self.req)
+ self.common_policy_auth(self.project_reader_authorized_contexts,
+ rule_name,
+ self.controller.index,
+ self.req)
@mock.patch('nova.network.security_group_api.get')
def test_show_security_groups_policy(self, mock_get):
rule_name = policies.POLICY_NAME % 'show'
- self.common_policy_check(self.reader_authorized_contexts,
- self.reader_unauthorized_contexts,
- rule_name,
- self.controller.show,
- self.req, uuids.fake_id)
+ self.common_policy_auth(self.project_reader_authorized_contexts,
+ rule_name,
+ self.controller.show,
+ self.req, uuids.fake_id)
@mock.patch('nova.network.security_group_api.get')
@mock.patch('nova.network.security_group_api.update_security_group')
@@ -167,11 +183,10 @@ class SecurityGroupsPolicyTest(base.BasePolicyTest):
body = {'security_group': {
'name': 'test',
'description': 'test-desc'}}
- self.common_policy_check(self.sys_admin_or_owner_authorized_contexts,
- self.sys_admin_or_owner_unauthorized_contexts,
- rule_name,
- self.controller.update,
- self.req, uuids.fake_id, body=body)
+ self.common_policy_auth(self.project_member_authorized_contexts,
+ rule_name,
+ self.controller.update,
+ self.req, uuids.fake_id, body=body)
@mock.patch('nova.network.security_group_api.create_security_group')
def test_create_security_groups_policy(self, mock_create):
@@ -179,21 +194,19 @@ class SecurityGroupsPolicyTest(base.BasePolicyTest):
body = {'security_group': {
'name': 'test',
'description': 'test-desc'}}
- self.common_policy_check(self.sys_admin_or_owner_authorized_contexts,
- self.sys_admin_or_owner_unauthorized_contexts,
- rule_name,
- self.controller.create,
- self.req, body=body)
+ self.common_policy_auth(self.project_member_authorized_contexts,
+ rule_name,
+ self.controller.create,
+ self.req, body=body)
@mock.patch('nova.network.security_group_api.get')
@mock.patch('nova.network.security_group_api.destroy')
def test_delete_security_groups_policy(self, mock_destroy, mock_get):
rule_name = policies.POLICY_NAME % 'delete'
- self.common_policy_check(self.sys_admin_or_owner_authorized_contexts,
- self.sys_admin_or_owner_unauthorized_contexts,
- rule_name,
- self.controller.delete,
- self.req, uuids.fake_id)
+ self.common_policy_auth(self.project_member_authorized_contexts,
+ rule_name,
+ self.controller.delete,
+ self.req, uuids.fake_id)
@mock.patch('nova.network.security_group_api.get')
@mock.patch('nova.network.security_group_api.create_security_group_rule')
@@ -202,12 +215,11 @@ class SecurityGroupsPolicyTest(base.BasePolicyTest):
body = {'security_group_rule': {
'ip_protocol': 'test', 'group_id': uuids.fake_id,
'parent_group_id': uuids.fake_id,
- 'from_port': 22, 'from_port': 22}}
- self.common_policy_check(self.sys_admin_or_owner_authorized_contexts,
- self.sys_admin_or_owner_unauthorized_contexts,
- rule_name,
- self.rule_ctr.create,
- self.req, body=body)
+ 'from_port': 22}}
+ self.common_policy_auth(self.project_member_authorized_contexts,
+ rule_name,
+ self.rule_ctr.create,
+ self.req, body=body)
@mock.patch('nova.network.security_group_api.get_rule')
@mock.patch('nova.network.security_group_api.get')
@@ -215,11 +227,52 @@ class SecurityGroupsPolicyTest(base.BasePolicyTest):
def test_delete_security_group_rules_policy(self, mock_remove, mock_get,
mock_rules):
rule_name = policies.POLICY_NAME % 'rule:delete'
- self.common_policy_check(self.sys_admin_or_owner_authorized_contexts,
- self.sys_admin_or_owner_unauthorized_contexts,
- rule_name,
- self.rule_ctr.delete,
- self.req, uuids.fake_id)
+ self.common_policy_auth(self.project_member_authorized_contexts,
+ rule_name,
+ self.rule_ctr.delete,
+ self.req, uuids.fake_id)
+
+
+class SecurityGroupsNoLegacyNoScopePolicyTest(
+ SecurityGroupsPolicyTest):
+ """Test Security Groups APIs policies with system scope enabled,
+ and no more deprecated rules.
+ """
+ without_deprecated_rules = True
+ rules_without_deprecation = {
+ policies.POLICY_NAME % 'get':
+ base_policy.PROJECT_READER_OR_ADMIN,
+ policies.POLICY_NAME % 'show':
+ base_policy.PROJECT_READER_OR_ADMIN,
+ policies.POLICY_NAME % 'create':
+ base_policy.PROJECT_MEMBER_OR_ADMIN,
+ policies.POLICY_NAME % 'update':
+ base_policy.PROJECT_MEMBER_OR_ADMIN,
+ policies.POLICY_NAME % 'delete':
+ base_policy.PROJECT_MEMBER_OR_ADMIN,
+ policies.POLICY_NAME % 'rule:create':
+ base_policy.PROJECT_MEMBER_OR_ADMIN,
+ policies.POLICY_NAME % 'rule:delete':
+ base_policy.PROJECT_MEMBER_OR_ADMIN}
+
+ def setUp(self):
+ super(SecurityGroupsNoLegacyNoScopePolicyTest, self).setUp()
+ # With no legacy, project other roles like foo will not be able
+ # to operate on SG.
+ self.project_member_authorized_contexts = [
+ self.legacy_admin_context, self.system_admin_context,
+ self.project_admin_context, self.project_member_context,
+ self.system_member_context,
+ self.other_project_member_context
+ ]
+ self.project_reader_authorized_contexts = [
+ self.legacy_admin_context, self.system_admin_context,
+ self.project_admin_context, self.project_member_context,
+ self.project_reader_context,
+ self.other_project_reader_context,
+ self.system_member_context, self.system_reader_context,
+ self.other_project_member_context
+ ]
class SecurityGroupsScopeTypePolicyTest(SecurityGroupsPolicyTest):
@@ -235,6 +288,20 @@ class SecurityGroupsScopeTypePolicyTest(SecurityGroupsPolicyTest):
def setUp(self):
super(SecurityGroupsScopeTypePolicyTest, self).setUp()
self.flags(enforce_scope=True, group="oslo_policy")
+ # With scope enabled, system users will not be able to
+ # operate on SG.
+ self.project_member_authorized_contexts = [
+ self.legacy_admin_context, self.project_admin_context,
+ self.project_member_context, self.project_reader_context,
+ self.project_foo_context, self.other_project_reader_context,
+ self.other_project_member_context
+ ]
+ self.project_reader_authorized_contexts = [
+ self.legacy_admin_context, self.project_admin_context,
+ self.project_member_context, self.project_reader_context,
+ self.project_foo_context, self.other_project_reader_context,
+ self.other_project_member_context
+ ]
class ServerSecurityGroupsScopeTypePolicyTest(ServerSecurityGroupsPolicyTest):
@@ -251,102 +318,71 @@ class ServerSecurityGroupsScopeTypePolicyTest(ServerSecurityGroupsPolicyTest):
def setUp(self):
super(ServerSecurityGroupsScopeTypePolicyTest, self).setUp()
self.flags(enforce_scope=True, group="oslo_policy")
+ # Scope enable will not allow system users.
+ self.project_member_authorized_contexts = (
+ self.project_m_r_or_admin_with_scope_and_legacy)
+ self.project_reader_authorized_contexts = (
+ self.project_m_r_or_admin_with_scope_and_legacy)
-class ServerSecurityGroupsNoLegacyPolicyTest(
+class ServerSecurityGroupsScopeTypeNoLegacyPolicyTest(
ServerSecurityGroupsScopeTypePolicyTest):
"""Test Security Groups APIs policies with system scope enabled,
- and no more deprecated rules that allow the legacy admin API to
- access system_admin_or_owner APIs.
+ and no more deprecated rules.
"""
without_deprecated_rules = True
rules_without_deprecation = {
policies.POLICY_NAME % 'list':
- base_policy.PROJECT_READER_OR_SYSTEM_READER,
+ base_policy.PROJECT_READER_OR_ADMIN,
policies.POLICY_NAME % 'add':
- base_policy.PROJECT_MEMBER_OR_SYSTEM_ADMIN,
+ base_policy.PROJECT_MEMBER_OR_ADMIN,
policies.POLICY_NAME % 'remove':
- base_policy.PROJECT_MEMBER_OR_SYSTEM_ADMIN}
+ base_policy.PROJECT_MEMBER_OR_ADMIN}
def setUp(self):
- super(ServerSecurityGroupsNoLegacyPolicyTest, self).setUp()
-
- # Check that system or projct admin or owner is able to operate
- # server security groups.
- self.admin_or_owner_authorized_contexts = [
- self.system_admin_context,
- self.project_admin_context, self.project_member_context]
- # Check that non-system and non-admin/owner is not able to operate
- # server security groups.
- self.admin_or_owner_unauthorized_contexts = [
- self.legacy_admin_context, self.project_reader_context,
- self.project_foo_context,
- self.system_member_context, self.system_reader_context,
- self.system_foo_context, self.other_project_member_context,
- self.other_project_reader_context]
-
- # Check that system reader or projct is able to get
- # server security groups.
- self.reader_authorized_contexts = [
- self.system_admin_context,
- self.project_admin_context, self.system_member_context,
- self.system_reader_context, self.project_reader_context,
- self.project_member_context,
- ]
-
- # Check that non-system reader nd non-admin/owner is not able to get
- # server security groups.
- self.reader_unauthorized_contexts = [
- self.legacy_admin_context, self.project_foo_context,
- self.system_foo_context, self.other_project_member_context,
- self.other_project_reader_context
- ]
+ super(ServerSecurityGroupsScopeTypeNoLegacyPolicyTest, self).setUp()
+ # With scope enable and no legacy rule, only project admin/member
+ # will be able to add/remove the SG to their server and reader
+ # will get SG of server.
+ self.project_member_authorized_contexts = (
+ self.project_member_or_admin_with_scope_no_legacy)
+ self.project_reader_authorized_contexts = (
+ self.project_reader_or_admin_with_scope_no_legacy)
class SecurityGroupsNoLegacyPolicyTest(SecurityGroupsScopeTypePolicyTest):
"""Test Security Groups APIs policies with system scope enabled,
- and no more deprecated rules that allow the legacy admin API to
- access system_admin_or_owner APIs.
+ and no more deprecated.
"""
without_deprecated_rules = True
rules_without_deprecation = {
policies.POLICY_NAME % 'get':
- base_policy.PROJECT_READER_OR_SYSTEM_READER,
+ base_policy.PROJECT_READER_OR_ADMIN,
policies.POLICY_NAME % 'show':
- base_policy.PROJECT_READER_OR_SYSTEM_READER,
+ base_policy.PROJECT_READER_OR_ADMIN,
policies.POLICY_NAME % 'create':
- base_policy.PROJECT_MEMBER_OR_SYSTEM_ADMIN,
+ base_policy.PROJECT_MEMBER_OR_ADMIN,
policies.POLICY_NAME % 'update':
- base_policy.PROJECT_MEMBER_OR_SYSTEM_ADMIN,
+ base_policy.PROJECT_MEMBER_OR_ADMIN,
policies.POLICY_NAME % 'delete':
- base_policy.PROJECT_MEMBER_OR_SYSTEM_ADMIN,
+ base_policy.PROJECT_MEMBER_OR_ADMIN,
policies.POLICY_NAME % 'rule:create':
- base_policy.PROJECT_MEMBER_OR_SYSTEM_ADMIN,
+ base_policy.PROJECT_MEMBER_OR_ADMIN,
policies.POLICY_NAME % 'rule:delete':
- base_policy.PROJECT_MEMBER_OR_SYSTEM_ADMIN}
+ base_policy.PROJECT_MEMBER_OR_ADMIN}
def setUp(self):
super(SecurityGroupsNoLegacyPolicyTest, self).setUp()
-
- self.reader_authorized_contexts = [
- self.legacy_admin_context, self.system_admin_context,
- self.project_admin_context,
+ # With no legacy and scope enabled, system users and project
+ # other roles like foo will not be able to operate SG.
+ self.project_member_authorized_contexts = [
+ self.legacy_admin_context, self.project_admin_context,
+ self.project_member_context,
+ self.other_project_member_context
+ ]
+ self.project_reader_authorized_contexts = [
+ self.legacy_admin_context, self.project_admin_context,
self.project_member_context, self.project_reader_context,
- self.system_member_context, self.system_reader_context,
- self.other_project_member_context,
self.other_project_reader_context,
- ]
- self.reader_unauthorized_contexts = [
- self.project_foo_context,
- self.system_foo_context
- ]
- self.sys_admin_or_owner_authorized_contexts = [
- self.system_admin_context, self.system_member_context,
- self.project_admin_context, self.project_member_context,
- self.legacy_admin_context, self.other_project_member_context
- ]
- self.sys_admin_or_owner_unauthorized_contexts = [
- self.system_reader_context,
- self.project_reader_context, self.project_foo_context,
- self.system_foo_context, self.other_project_reader_context
+ self.other_project_member_context
]
diff --git a/nova/tests/unit/policies/test_server_diagnostics.py b/nova/tests/unit/policies/test_server_diagnostics.py
index 04a099a7a3..4a4b192baa 100644
--- a/nova/tests/unit/policies/test_server_diagnostics.py
+++ b/nova/tests/unit/policies/test_server_diagnostics.py
@@ -10,8 +10,9 @@
# License for the specific language governing permissions and limitations
# under the License.
+from unittest import mock
+
import fixtures
-import mock
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import timeutils
@@ -46,26 +47,24 @@ class ServerDiagnosticsPolicyTest(base.BasePolicyTest):
task_state=None, launched_at=timeutils.utcnow())
self.mock_get.return_value = self.instance
- # Check that admin is able to get server diagnostics.
- self.admin_authorized_contexts = [
+ # With legacy rule, any admin is able get server diagnostics.
+ self.project_admin_authorized_contexts = [
self.legacy_admin_context, self.system_admin_context,
- self.project_admin_context
- ]
- # Check that non-admin is not able to get server diagnostics.
- self.admin_unauthorized_contexts = [
- self.system_member_context, self.system_reader_context,
- self.system_foo_context, self.project_member_context,
- self.project_reader_context, self.project_foo_context,
- self.other_project_member_context,
- self.other_project_reader_context,
- ]
+ self.project_admin_context]
def test_server_diagnostics_policy(self):
rule_name = policies.BASE_POLICY_NAME
- self.common_policy_check(self.admin_authorized_contexts,
- self.admin_unauthorized_contexts,
- rule_name, self.controller.index,
- self.req, self.instance.uuid)
+ self.common_policy_auth(self.project_admin_authorized_contexts,
+ rule_name, self.controller.index,
+ self.req, self.instance.uuid)
+
+
+class ServerDiagnosticsNoLegacyNoScopeTest(ServerDiagnosticsPolicyTest):
+ """Test Server Diagnostics API policies with deprecated rules
+ disabled, but scope checking still disabled.
+ """
+
+ without_deprecated_rules = True
class ServerDiagnosticsScopeTypePolicyTest(ServerDiagnosticsPolicyTest):
@@ -82,33 +81,21 @@ class ServerDiagnosticsScopeTypePolicyTest(ServerDiagnosticsPolicyTest):
def setUp(self):
super(ServerDiagnosticsScopeTypePolicyTest, self).setUp()
self.flags(enforce_scope=True, group="oslo_policy")
+ # With scope enabled, system admin is not allowed.
+ self.project_admin_authorized_contexts = [
+ self.legacy_admin_context, self.project_admin_context]
-class ServerDiagnosticsNoLegacyPolicyTest(
+class ServerDiagnosticsScopeTypeNoLegacyPolicyTest(
ServerDiagnosticsScopeTypePolicyTest):
"""Test Server Diagnostics APIs policies with system scope enabled,
and no more deprecated rules.
"""
without_deprecated_rules = True
- def setUp(self):
- super(ServerDiagnosticsNoLegacyPolicyTest, self).setUp()
- # Check that system admin is able to get server diagnostics.
- self.admin_authorized_contexts = [
- self.system_admin_context
- ]
- # Check that non system admin is not able to get server diagnostics.
- self.admin_unauthorized_contexts = [
- self.legacy_admin_context, self.project_admin_context,
- self.system_member_context, self.system_reader_context,
- self.system_foo_context, self.project_member_context,
- self.project_reader_context, self.project_foo_context,
- self.other_project_member_context,
- self.other_project_reader_context,
- ]
-
-
-class ServerDiagnosticsOverridePolicyTest(ServerDiagnosticsNoLegacyPolicyTest):
+
+class ServerDiagnosticsOverridePolicyTest(
+ ServerDiagnosticsScopeTypeNoLegacyPolicyTest):
"""Test Server Diagnostics APIs policies with system and project scoped
but default to system roles only are allowed for project roles
if override by operators. This test is with system scope enable
@@ -119,22 +106,12 @@ class ServerDiagnosticsOverridePolicyTest(ServerDiagnosticsNoLegacyPolicyTest):
super(ServerDiagnosticsOverridePolicyTest, self).setUp()
rule = policies.BASE_POLICY_NAME
# NOTE(gmann): override the rule to project member and verify it
- # work as policy is system and projct scoped.
+ # work as policy is project scoped.
self.policy.set_rules({
- rule: base_policy.PROJECT_MEMBER_OR_SYSTEM_ADMIN},
+ rule: base_policy.PROJECT_MEMBER},
overwrite=False)
- # Check that system admin or project scoped role as override above
+ # Check that project member role as override above
# is able to get server diagnostics.
- self.admin_authorized_contexts = [
- self.system_admin_context,
+ self.project_admin_authorized_contexts = [
self.project_admin_context, self.project_member_context]
- # Check that non-system admin or project role is not able to
- # get server diagnostics.
- self.admin_unauthorized_contexts = [
- self.legacy_admin_context, self.system_member_context,
- self.system_reader_context, self.system_foo_context,
- self.other_project_member_context,
- self.project_foo_context, self.project_reader_context,
- self.other_project_reader_context,
- ]
diff --git a/nova/tests/unit/policies/test_server_external_events.py b/nova/tests/unit/policies/test_server_external_events.py
index f8f1bcd663..401b55325f 100644
--- a/nova/tests/unit/policies/test_server_external_events.py
+++ b/nova/tests/unit/policies/test_server_external_events.py
@@ -10,7 +10,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from oslo_utils.fixture import uuidsentinel as uuids
from nova.api.openstack.compute import server_external_events as ev
@@ -33,20 +34,12 @@ class ServerExternalEventsPolicyTest(base.BasePolicyTest):
self.controller = ev.ServerExternalEventsController()
self.req = fakes.HTTPRequest.blank('')
- # Check that admin is able to create the server external events.
- self.admin_authorized_contexts = [
+ # With legacy rule and no scope checks, all admin can
+ # create the server external events.
+ self.project_admin_authorized_contexts = [
self.legacy_admin_context, self.system_admin_context,
self.project_admin_context
]
- # Check that non-admin is not able to create the server
- # external events.
- self.admin_unauthorized_contexts = [
- self.system_member_context, self.system_reader_context,
- self.system_foo_context, self.project_member_context,
- self.project_reader_context, self.project_foo_context,
- self.other_project_member_context,
- self.other_project_reader_context,
- ]
@mock.patch('nova.compute.api.API.external_instance_event')
@mock.patch('nova.objects.InstanceMappingList.get_by_instance_uuids')
@@ -58,10 +51,18 @@ class ServerExternalEventsPolicyTest(base.BasePolicyTest):
'server_uuid': uuids.fake_id,
'status': 'completed'}]
}
- self.common_policy_check(self.admin_authorized_contexts,
- self.admin_unauthorized_contexts,
- rule_name, self.controller.create,
- self.req, body=body)
+ self.common_policy_auth(self.project_admin_authorized_contexts,
+ rule_name, self.controller.create,
+ self.req, body=body)
+
+
+class ServerExternalEventsNoLegacyNoScopeTest(
+ ServerExternalEventsPolicyTest):
+ """Test Server External Events API policies with deprecated rules
+ disabled, but scope checking still disabled.
+ """
+
+ without_deprecated_rules = True
class ServerExternalEventsScopeTypePolicyTest(ServerExternalEventsPolicyTest):
@@ -79,23 +80,12 @@ class ServerExternalEventsScopeTypePolicyTest(ServerExternalEventsPolicyTest):
super(ServerExternalEventsScopeTypePolicyTest, self).setUp()
self.flags(enforce_scope=True, group="oslo_policy")
- # Check that admin is able to create the server external events.
- self.admin_authorized_contexts = [
- self.system_admin_context,
- ]
- # Check that non-admin is not able to create the server
- # external events.
- self.admin_unauthorized_contexts = [
- self.legacy_admin_context, self.project_admin_context,
- self.system_member_context, self.system_reader_context,
- self.system_foo_context, self.project_member_context,
- self.project_reader_context, self.project_foo_context,
- self.other_project_member_context,
- self.other_project_reader_context,
- ]
+ # With scope checks, system admin is not allowed.
+ self.project_admin_authorized_contexts = [
+ self.legacy_admin_context, self.project_admin_context]
-class ServerExternalEventsNoLegacyPolicyTest(
+class ServerExternalEventsScopeTypeNoLegacyPolicyTest(
ServerExternalEventsScopeTypePolicyTest):
"""Test Server External Events APIs policies with system scope enabled,
and no more deprecated rules.
diff --git a/nova/tests/unit/policies/test_server_groups.py b/nova/tests/unit/policies/test_server_groups.py
index 0c8b3de0cd..b0df7ccb89 100644
--- a/nova/tests/unit/policies/test_server_groups.py
+++ b/nova/tests/unit/policies/test_server_groups.py
@@ -9,17 +9,22 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
+import functools
+
+from unittest import mock
import fixtures
-import mock
from oslo_utils.fixture import uuidsentinel as uuids
from nova.api.openstack.compute import server_groups
+import nova.conf
from nova import objects
from nova.policies import server_groups as policies
from nova.tests.unit.api.openstack import fakes
from nova.tests.unit.policies import base
+CONF = nova.conf.CONF
+
class ServerGroupPolicyTest(base.BasePolicyTest):
"""Test Server Groups APIs policies with all possible context.
@@ -45,98 +50,85 @@ class ServerGroupPolicyTest(base.BasePolicyTest):
user_id='u2', policies=[], members=[])]
self.mock_get.return_value = self.sg[0]
- # Check that admin or and owner is able to delete
- # the server group.
- self.admin_or_owner_authorized_contexts = [
+ # With legacy rule and no scope checks, all admin, project members
+ # project reader or project any role(because legacy rule allow SG
+ # owner- having same project id and no role check) is able to
+ # delete and get SG.
+ self.project_member_authorized_contexts = [
self.legacy_admin_context, self.system_admin_context,
self.project_admin_context, self.project_member_context,
- self.project_reader_context, self.project_foo_context]
- # Check that non-admin/owner is not able to delete
- # the server group.
- self.admin_or_owner_unauthorized_contexts = [
- self.system_member_context, self.system_reader_context,
- self.system_foo_context,
- self.other_project_member_context,
- self.other_project_reader_context,
+ self.project_reader_context, self.project_foo_context,
]
- # Check that system reader or owner is able to get
- # the server group. Due to old default everyone
- # is allowed to perform this operation.
- self.system_reader_or_owner_authorized_contexts = [
+ self.project_reader_authorized_contexts = [
self.legacy_admin_context, self.system_admin_context,
self.project_admin_context, self.project_member_context,
- self.project_reader_context, self.system_member_context,
- self.system_reader_context, self.project_foo_context
- ]
- self.system_reader_or_owner_unauthorized_contexts = [
- self.system_foo_context,
- self.other_project_member_context,
- self.other_project_reader_context,
+ self.project_reader_context, self.project_foo_context,
]
- # Check that everyone is able to list
- # theie own server group. Due to old defaults everyone
- # is able to list their server groups.
+ # By default, legacy rule are enabled and scope check is disabled.
+ # system admin, legacy admin, and project admin is able to get
+ # all projects SG.
+ self.project_admin_authorized_contexts = [
+ self.legacy_admin_context, self.system_admin_context,
+ self.project_admin_context]
+
+ # List SG can not check for project id so everyone is allowed.
self.everyone_authorized_contexts = [
self.legacy_admin_context, self.system_admin_context,
self.project_admin_context, self.project_member_context,
self.project_reader_context, self.project_foo_context,
- self.system_member_context, self.system_reader_context,
- self.system_foo_context,
- self.other_project_member_context,
self.other_project_reader_context,
- ]
- self.everyone_unauthorized_contexts = [
- ]
- # Check that project member is able to create server group.
- # Due to old defaults everyone is able to list their server groups.
- self.project_member_authorized_contexts = [
- self.legacy_admin_context, self.system_admin_context,
- self.project_admin_context, self.project_member_context,
- self.system_member_context, self.project_reader_context,
- self.project_foo_context, self.system_reader_context,
+ self.system_member_context, self.system_reader_context,
self.system_foo_context,
- self.other_project_member_context,
- self.other_project_reader_context,
+ self.other_project_member_context
]
- self.project_member_unauthorized_contexts = []
+
+ # With legacy rule, anyone can create SG.
+ self.project_create_authorized_contexts = (
+ self.everyone_authorized_contexts)
@mock.patch('nova.objects.InstanceGroupList.get_by_project_id')
def test_index_server_groups_policy(self, mock_get):
rule_name = policies.POLICY_ROOT % 'index'
- self.common_policy_check(self.everyone_authorized_contexts,
- self.everyone_unauthorized_contexts,
- rule_name,
- self.controller.index,
- self.req)
+ self.common_policy_auth(self.everyone_authorized_contexts,
+ rule_name,
+ self.controller.index,
+ self.req)
@mock.patch('nova.objects.InstanceGroupList.get_all')
- def test_index_all_project_server_groups_policy(self, mock_get_all):
+ @mock.patch('nova.objects.InstanceGroupList.get_by_project_id')
+ def test_index_all_project_server_groups_policy(self, mock_get,
+ mock_get_all):
mock_get_all.return_value = objects.InstanceGroupList(objects=self.sg)
+ mock_get.return_value = objects.InstanceGroupList(
+ objects=[self.sg[0]])
# 'index' policy is checked before 'index:all_projects' so
# we have to allow it for everyone otherwise it will fail for
# unauthorized contexts here.
rule = policies.POLICY_ROOT % 'index'
self.policy.set_rules({rule: "@"}, overwrite=False)
- admin_req = fakes.HTTPRequest.blank(
- '/os-server-groups?all_projects=True',
- version='2.13', use_admin_context=True)
- # Check admin user get all projects server groups.
- resp = self.controller.index(admin_req)
- projs = [sg['project_id'] for sg in resp['server_groups']]
- self.assertEqual(2, len(projs))
- self.assertIn('proj2', projs)
- # Check non-admin user does not get all projects server groups.
- req = fakes.HTTPRequest.blank('/os-server-groups?all_projects=True',
- version='2.13')
- resp = self.controller.index(req)
- projs = [sg['project_id'] for sg in resp['server_groups']]
- self.assertNotIn('proj2', projs)
+ rule_name = policies.POLICY_ROOT % 'index:all_projects'
+ req = fakes.HTTPRequest.blank('?all_projects', version='2.13')
+ if not CONF.oslo_policy.enforce_scope:
+ check_rule = rule_name
+ else:
+ check_rule = functools.partial(base.rule_if_system,
+ rule, rule_name)
+ authorize_res, unauthorize_res = self.common_policy_auth(
+ self.project_admin_authorized_contexts,
+ check_rule, self.controller.index,
+ req, fatal=False)
+ for resp in authorize_res:
+ projs = [sg['project_id'] for sg in resp['server_groups']]
+ self.assertEqual(2, len(projs))
+ self.assertIn('proj2', projs)
+ for resp in unauthorize_res:
+ projs = [sg['project_id'] for sg in resp['server_groups']]
+ self.assertNotIn('proj2', projs)
def test_show_server_groups_policy(self):
rule_name = policies.POLICY_ROOT % 'show'
- self.common_policy_check(
- self.system_reader_or_owner_authorized_contexts,
- self.system_reader_or_owner_unauthorized_contexts,
+ self.common_policy_auth(
+ self.project_reader_authorized_contexts,
rule_name,
self.controller.show,
self.req, uuids.fake_id)
@@ -146,20 +138,54 @@ class ServerGroupPolicyTest(base.BasePolicyTest):
rule_name = policies.POLICY_ROOT % 'create'
body = {'server_group': {'name': 'fake',
'policies': ['affinity']}}
- self.common_policy_check(self.project_member_authorized_contexts,
- self.project_member_unauthorized_contexts,
- rule_name,
- self.controller.create,
- self.req, body=body)
+ self.common_policy_auth(self.project_create_authorized_contexts,
+ rule_name,
+ self.controller.create,
+ self.req, body=body)
@mock.patch('nova.objects.InstanceGroup.destroy')
def test_delete_server_groups_policy(self, mock_destroy):
rule_name = policies.POLICY_ROOT % 'delete'
- self.common_policy_check(self.admin_or_owner_authorized_contexts,
- self.admin_or_owner_unauthorized_contexts,
- rule_name,
- self.controller.delete,
- self.req, uuids.fake_id)
+ self.common_policy_auth(self.project_member_authorized_contexts,
+ rule_name,
+ self.controller.delete,
+ self.req, uuids.fake_id)
+
+
+class ServerGroupNoLegacyNoScopePolicyTest(ServerGroupPolicyTest):
+ """Test Server Groups APIs policies with no legacy deprecated rules
+ and no scope checks which means new defaults only.
+
+ """
+ without_deprecated_rules = True
+
+ def setUp(self):
+ super(ServerGroupNoLegacyNoScopePolicyTest, self).setUp()
+ # With no legacy, only project admin, member will be able to delete
+ # the SG and also reader will be able to get the SG.
+ self.project_member_authorized_contexts = (
+ self.project_member_or_admin_with_no_scope_no_legacy)
+ self.project_reader_authorized_contexts = (
+ self.project_reader_or_admin_with_no_scope_no_legacy)
+
+ # Even with no legacy rule, legacy admin is allowed to create SG
+ # use requesting context's project_id. Same for list SG.
+ self.project_create_authorized_contexts = [
+ self.legacy_admin_context, self.system_admin_context,
+ self.project_admin_context, self.project_member_context,
+ self.system_member_context, self.other_project_member_context]
+
+ self.project_admin_authorized_contexts = [
+ self.legacy_admin_context, self.system_admin_context,
+ self.project_admin_context]
+
+ self.everyone_authorized_contexts = [
+ self.legacy_admin_context, self.system_admin_context,
+ self.project_admin_context, self.project_member_context,
+ self.project_reader_context, self.other_project_reader_context,
+ self.system_member_context, self.system_reader_context,
+ self.other_project_member_context
+ ]
class ServerGroupScopeTypePolicyTest(ServerGroupPolicyTest):
@@ -176,27 +202,31 @@ class ServerGroupScopeTypePolicyTest(ServerGroupPolicyTest):
super(ServerGroupScopeTypePolicyTest, self).setUp()
self.flags(enforce_scope=True, group="oslo_policy")
- # Check if project scoped can create the server group.
- self.project_member_authorized_contexts = [
+ # With scope enable, it disallow system users.
+ self.project_member_authorized_contexts = (
+ self.project_m_r_or_admin_with_scope_and_legacy)
+ self.project_reader_authorized_contexts = (
+ self.project_m_r_or_admin_with_scope_and_legacy)
+
+ self.project_create_authorized_contexts = [
self.legacy_admin_context, self.project_admin_context,
self.project_member_context, self.project_reader_context,
- self.project_foo_context,
- self.other_project_member_context,
+ self.project_foo_context, self.other_project_reader_context,
+ self.other_project_member_context]
+
+ self.project_admin_authorized_contexts = [
+ self.legacy_admin_context, self.project_admin_context]
+
+ self.everyone_authorized_contexts = [
+ self.legacy_admin_context, self.project_admin_context,
+ self.project_member_context,
+ self.project_reader_context, self.project_foo_context,
self.other_project_reader_context,
+ self.other_project_member_context
]
- # Check if non-project scoped cannot create the server group.
- self.project_member_unauthorized_contexts = [
- self.system_admin_context, self.system_member_context,
- self.system_reader_context, self.system_foo_context
- ]
-
- # TODO(gmann): Test this with system scope once we remove
- # the hardcoded admin check
- def test_index_all_project_server_groups_policy(self):
- pass
-class ServerGroupNoLegacyPolicyTest(ServerGroupScopeTypePolicyTest):
+class ServerGroupScopeTypeNoLegacyPolicyTest(ServerGroupScopeTypePolicyTest):
"""Test Server Group APIs policies with system scope enabled,
and no more deprecated rules that allow the legacy admin API to
access system APIs.
@@ -204,58 +234,25 @@ class ServerGroupNoLegacyPolicyTest(ServerGroupScopeTypePolicyTest):
without_deprecated_rules = True
def setUp(self):
- super(ServerGroupNoLegacyPolicyTest, self).setUp()
+ super(ServerGroupScopeTypeNoLegacyPolicyTest, self).setUp()
+
+ self.project_member_authorized_contexts = (
+ self.project_member_or_admin_with_scope_no_legacy)
+
+ self.project_create_authorized_contexts = [
+ self.legacy_admin_context, self.project_admin_context,
+ self.project_member_context,
+ self.other_project_member_context]
+
+ self.project_reader_authorized_contexts = (
+ self.project_reader_or_admin_with_scope_no_legacy)
+
+ self.project_admin_authorized_contexts = [
+ self.legacy_admin_context, self.project_admin_context]
- # Check that system admin or and owner is able to delete
- # the server group.
- self.admin_or_owner_authorized_contexts = [
- self.system_admin_context,
- self.project_admin_context, self.project_member_context,
- ]
- # Check that non-system admin/owner is not able to delete
- # the server group.
- self.admin_or_owner_unauthorized_contexts = [
- self.legacy_admin_context, self.system_member_context,
- self.system_reader_context, self.system_foo_context,
- self.project_reader_context, self.project_foo_context,
- self.other_project_member_context,
- self.other_project_reader_context,
- ]
- # Check that system reader or owner is able to get
- # the server group.
- self.system_reader_or_owner_authorized_contexts = [
- self.system_admin_context,
- self.project_admin_context, self.project_member_context,
- self.project_reader_context, self.system_member_context,
- self.system_reader_context
- ]
- self.system_reader_or_owner_unauthorized_contexts = [
- self.legacy_admin_context, self.system_foo_context,
- self.other_project_member_context, self.project_foo_context,
- self.other_project_reader_context,
- ]
self.everyone_authorized_contexts = [
- self.legacy_admin_context, self.system_admin_context,
- self.project_admin_context,
- self.project_member_context, self.project_reader_context,
- self.system_member_context, self.system_reader_context,
- self.other_project_member_context,
- self.other_project_reader_context,
- ]
- self.everyone_unauthorized_contexts = [
- self.project_foo_context,
- self.system_foo_context
- ]
- # Check if project member can create the server group.
- self.project_member_authorized_contexts = [
self.legacy_admin_context, self.project_admin_context,
- self.project_member_context, self.other_project_member_context
- ]
- # Check if non-project member cannot create the server group.
- self.project_member_unauthorized_contexts = [
- self.system_admin_context,
- self.system_member_context, self.system_reader_context,
- self.system_foo_context, self.project_reader_context,
- self.project_foo_context,
+ self.project_member_context, self.project_reader_context,
self.other_project_reader_context,
+ self.other_project_member_context
]
diff --git a/nova/tests/unit/policies/test_server_ips.py b/nova/tests/unit/policies/test_server_ips.py
index 29bd2d81c4..b837d2d0e2 100644
--- a/nova/tests/unit/policies/test_server_ips.py
+++ b/nova/tests/unit/policies/test_server_ips.py
@@ -49,37 +49,43 @@ class ServerIpsPolicyTest(base.BasePolicyTest):
self.mock_get_network.return_value = {'net1':
{'ips': '', 'floating_ips': ''}}
- # Check that admin or and server owner is able to get server
- # IP addresses.
- self.reader_or_owner_authorized_contexts = [
+ # With legacy rule, any admin or project role is able to get their
+ # server IP addresses.
+ self.project_reader_authorized_contexts = [
self.legacy_admin_context, self.system_admin_context,
self.project_admin_context, self.project_member_context,
self.project_reader_context, self.project_foo_context,
- self.system_member_context, self.system_reader_context]
- # Check that non-admin/owner is not able to get the server IP
- # adderesses
- self.reader_or_owner_unauthorized_contexts = [
- self.system_foo_context,
- self.other_project_member_context,
- self.other_project_reader_context,
]
def test_index_ips_policy(self):
rule_name = ips_policies.POLICY_ROOT % 'index'
- self.common_policy_check(self.reader_or_owner_authorized_contexts,
- self.reader_or_owner_unauthorized_contexts,
- rule_name,
- self.controller.index,
- self.req, self.instance.uuid)
+ self.common_policy_auth(self.project_reader_authorized_contexts,
+ rule_name,
+ self.controller.index,
+ self.req, self.instance.uuid)
def test_show_ips_policy(self):
rule_name = ips_policies.POLICY_ROOT % 'show'
- self.common_policy_check(self.reader_or_owner_authorized_contexts,
- self.reader_or_owner_unauthorized_contexts,
- rule_name,
- self.controller.show,
- self.req, self.instance.uuid,
- 'net1')
+ self.common_policy_auth(self.project_reader_authorized_contexts,
+ rule_name,
+ self.controller.show,
+ self.req, self.instance.uuid,
+ 'net1')
+
+
+class ServerIpsNoLegacyNoScopePolicyTest(ServerIpsPolicyTest):
+ """Test Server Ips APIs policies with no legacy deprecated rules
+ and no scope checks which means new defaults only.
+
+ """
+ without_deprecated_rules = True
+
+ def setUp(self):
+ super(ServerIpsNoLegacyNoScopePolicyTest, self).setUp()
+ # With no legacy, only project admin, member, and reader will be able
+ # to get their server IP addresses.
+ self.project_reader_authorized_contexts = (
+ self.project_reader_or_admin_with_no_scope_no_legacy)
class ServerIpsScopeTypePolicyTest(ServerIpsPolicyTest):
@@ -95,28 +101,21 @@ class ServerIpsScopeTypePolicyTest(ServerIpsPolicyTest):
def setUp(self):
super(ServerIpsScopeTypePolicyTest, self).setUp()
self.flags(enforce_scope=True, group="oslo_policy")
+ # With scope enabled, system users will not be able
+ # to get the server IP addresses.
+ self.project_reader_authorized_contexts = (
+ self.project_m_r_or_admin_with_scope_and_legacy)
-class ServerIpsNoLegacyPolicyTest(ServerIpsScopeTypePolicyTest):
+class ServerIpsScopeTypeNoLegacyPolicyTest(ServerIpsScopeTypePolicyTest):
"""Test Server IPs APIs policies with system scope enabled,
- and no more deprecated rules that allow the legacy admin API to
- access system_admin_or_owner APIs.
+ and no more deprecated rules.
"""
without_deprecated_rules = True
def setUp(self):
- super(ServerIpsNoLegacyPolicyTest, self).setUp()
-
- # Check that system reader or owner is able to
- # get the server IP adderesses.
- self.reader_or_owner_authorized_contexts = [
- self.system_admin_context, self.system_member_context,
- self.system_reader_context, self.project_admin_context,
- self.project_member_context, self.project_reader_context]
- # Check that non-system and non-owner is not able to
- # get the server IP adderesses.
- self.reader_or_owner_unauthorized_contexts = [
- self.legacy_admin_context, self.project_foo_context,
- self.system_foo_context, self.other_project_member_context,
- self.other_project_reader_context,
- ]
+ super(ServerIpsScopeTypeNoLegacyPolicyTest, self).setUp()
+ # With no legacy and scope enable, only admin, member,
+ # and reader will be able to get their server IP addresses.
+ self.project_reader_authorized_contexts = (
+ self.project_reader_or_admin_with_scope_no_legacy)
diff --git a/nova/tests/unit/policies/test_server_metadata.py b/nova/tests/unit/policies/test_server_metadata.py
index 89c6480adc..cf4fb19e7b 100644
--- a/nova/tests/unit/policies/test_server_metadata.py
+++ b/nova/tests/unit/policies/test_server_metadata.py
@@ -10,8 +10,9 @@
# License for the specific language governing permissions and limitations
# under the License.
+from unittest import mock
+
import fixtures
-import mock
from oslo_utils.fixture import uuidsentinel as uuids
from nova.api.openstack.compute import server_metadata
@@ -40,92 +41,88 @@ class ServerMetadataPolicyTest(base.BasePolicyTest):
id=1, uuid=uuids.fake_id, project_id=self.project_id)
self.mock_get.return_value = self.instance
- # Check that admin or and server owner is able to CRUD
- # the server metadata.
- self.admin_or_owner_authorized_contexts = [
- self.legacy_admin_context, self.system_admin_context,
- self.project_admin_context, self.project_member_context,
- self.project_reader_context, self.project_foo_context]
- # Check that non-admin/owner is not able to CRUD
- # the server metadata
- self.admin_or_owner_unauthorized_contexts = [
- self.system_member_context, self.system_reader_context,
- self.system_foo_context, self.other_project_member_context,
- self.other_project_reader_context
- ]
- # Check that admin or and server owner is able to get
- # the server metadata.
- self.reader_authorized_contexts = [
+ # With legacy rule and no scope checks, all admin, project members
+ # project reader or other project role(because legacy rule allow server
+ # owner- having same project id and no role check) is able to create,
+ # update, and delete the server metadata.
+ self.project_member_authorized_contexts = [
self.legacy_admin_context, self.system_admin_context,
- self.system_member_context, self.system_reader_context,
self.project_admin_context, self.project_member_context,
self.project_reader_context, self.project_foo_context]
- # Check that non-admin/owner is not able to get
- # the server metadata.
- self.reader_unauthorized_contexts = [
- self.system_foo_context, self.other_project_member_context,
- self.other_project_reader_context
- ]
+ # and they can get their own server metadata.
+ self.project_reader_authorized_contexts = (
+ self.project_member_authorized_contexts)
@mock.patch('nova.compute.api.API.get_instance_metadata')
def test_index_server_Metadata_policy(self, mock_get):
rule_name = policies.POLICY_ROOT % 'index'
- self.common_policy_check(self.reader_authorized_contexts,
- self.reader_unauthorized_contexts,
- rule_name,
- self.controller.index,
- self.req, self.instance.uuid)
+ self.common_policy_auth(self.project_reader_authorized_contexts,
+ rule_name,
+ self.controller.index,
+ self.req, self.instance.uuid)
@mock.patch('nova.compute.api.API.get_instance_metadata')
def test_show_server_Metadata_policy(self, mock_get):
rule_name = policies.POLICY_ROOT % 'show'
mock_get.return_value = {'key9': 'value'}
- self.common_policy_check(self.reader_authorized_contexts,
- self.reader_unauthorized_contexts,
- rule_name,
- self.controller.show,
- self.req, self.instance.uuid, 'key9')
+ self.common_policy_auth(self.project_reader_authorized_contexts,
+ rule_name,
+ self.controller.show,
+ self.req, self.instance.uuid, 'key9')
@mock.patch('nova.compute.api.API.update_instance_metadata')
def test_create_server_Metadata_policy(self, mock_quota):
rule_name = policies.POLICY_ROOT % 'create'
- self.common_policy_check(self.admin_or_owner_authorized_contexts,
- self.admin_or_owner_unauthorized_contexts,
- rule_name,
- self.controller.create,
- self.req, self.instance.uuid,
- body={"metadata": {"key9": "value9"}})
+ self.common_policy_auth(self.project_member_authorized_contexts,
+ rule_name,
+ self.controller.create,
+ self.req, self.instance.uuid,
+ body={"metadata": {"key9": "value9"}})
@mock.patch('nova.compute.api.API.update_instance_metadata')
def test_update_server_Metadata_policy(self, mock_quota):
rule_name = policies.POLICY_ROOT % 'update'
- self.common_policy_check(self.admin_or_owner_authorized_contexts,
- self.admin_or_owner_unauthorized_contexts,
- rule_name,
- self.controller.update,
- self.req, self.instance.uuid, 'key9',
- body={"meta": {"key9": "value9"}})
+ self.common_policy_auth(self.project_member_authorized_contexts,
+ rule_name,
+ self.controller.update,
+ self.req, self.instance.uuid, 'key9',
+ body={"meta": {"key9": "value9"}})
@mock.patch('nova.compute.api.API.update_instance_metadata')
def test_update_all_server_Metadata_policy(self, mock_quota):
rule_name = policies.POLICY_ROOT % 'update_all'
- self.common_policy_check(self.admin_or_owner_authorized_contexts,
- self.admin_or_owner_unauthorized_contexts,
- rule_name,
- self.controller.update_all,
- self.req, self.instance.uuid,
- body={"metadata": {"key9": "value9"}})
+ self.common_policy_auth(self.project_member_authorized_contexts,
+ rule_name,
+ self.controller.update_all,
+ self.req, self.instance.uuid,
+ body={"metadata": {"key9": "value9"}})
@mock.patch('nova.compute.api.API.get_instance_metadata')
@mock.patch('nova.compute.api.API.delete_instance_metadata')
def test_delete_server_Metadata_policy(self, mock_delete, mock_get):
rule_name = policies.POLICY_ROOT % 'delete'
mock_get.return_value = {'key9': 'value'}
- self.common_policy_check(self.admin_or_owner_authorized_contexts,
- self.admin_or_owner_unauthorized_contexts,
- rule_name,
- self.controller.delete,
- self.req, self.instance.uuid, 'key9')
+ self.common_policy_auth(self.project_member_authorized_contexts,
+ rule_name,
+ self.controller.delete,
+ self.req, self.instance.uuid, 'key9')
+
+
+class ServerMetadataNoLegacyNoScopePolicyTest(ServerMetadataPolicyTest):
+ """Test Server Metadata APIs policies with no legacy deprecated rules
+ and no scope checks which means new defaults only.
+
+ """
+
+ without_deprecated_rules = True
+
+ def setUp(self):
+ super(ServerMetadataNoLegacyNoScopePolicyTest, self).setUp()
+ # With no legacy rule, legacy admin loose power.
+ self.project_member_authorized_contexts = (
+ self.project_member_or_admin_with_no_scope_no_legacy)
+ self.project_reader_authorized_contexts = (
+ self.project_reader_or_admin_with_no_scope_no_legacy)
class ServerMetadataScopeTypePolicyTest(ServerMetadataPolicyTest):
@@ -141,9 +138,15 @@ class ServerMetadataScopeTypePolicyTest(ServerMetadataPolicyTest):
def setUp(self):
super(ServerMetadataScopeTypePolicyTest, self).setUp()
self.flags(enforce_scope=True, group="oslo_policy")
+ # With Scope enable, system users no longer allowed.
+ self.project_member_authorized_contexts = (
+ self.project_m_r_or_admin_with_scope_and_legacy)
+ self.project_reader_authorized_contexts = (
+ self.project_m_r_or_admin_with_scope_and_legacy)
-class ServerMetadataNoLegacyPolicyTest(ServerMetadataScopeTypePolicyTest):
+class ServerMetadataScopeTypeNoLegacyPolicyTest(
+ ServerMetadataScopeTypePolicyTest):
"""Test Server Metadata APIs policies with system scope enabled,
and no more deprecated rules that allow the legacy admin API to
access system APIs.
@@ -151,32 +154,10 @@ class ServerMetadataNoLegacyPolicyTest(ServerMetadataScopeTypePolicyTest):
without_deprecated_rules = True
def setUp(self):
- super(ServerMetadataNoLegacyPolicyTest, self).setUp()
- # Check that system admin or project member is able to create, update
- # and delete the server metadata.
- self.admin_or_owner_authorized_contexts = [
- self.system_admin_context, self.project_admin_context,
- self.project_member_context]
- # Check that non-system/admin/member is not able to create, update
- # and delete the server metadata.
- self.admin_or_owner_unauthorized_contexts = [
- self.legacy_admin_context, self.system_reader_context,
- self.system_foo_context, self.system_member_context,
- self.project_reader_context, self.project_foo_context,
- self.other_project_member_context,
- self.other_project_reader_context
- ]
- # Check that system admin or project member is able to
- # get the server metadata.
- self.reader_authorized_contexts = [
- self.system_admin_context,
- self.system_member_context, self.system_reader_context,
- self.project_admin_context, self.project_member_context,
- self.project_reader_context]
- # Check that non-system/admin/member is not able to
- # get the server metadata.
- self.reader_unauthorized_contexts = [
- self.legacy_admin_context, self.system_foo_context,
- self.project_foo_context, self.other_project_member_context,
- self.other_project_reader_context
- ]
+ super(ServerMetadataScopeTypeNoLegacyPolicyTest, self).setUp()
+ # With no legacy and scope enable, only project admin, member,
+ # and reader will be able to allowed operation on server metadata.
+ self.project_member_authorized_contexts = (
+ self.project_member_or_admin_with_scope_no_legacy)
+ self.project_reader_authorized_contexts = (
+ self.project_reader_or_admin_with_scope_no_legacy)
diff --git a/nova/tests/unit/policies/test_server_migrations.py b/nova/tests/unit/policies/test_server_migrations.py
index b06d9ec167..b17d4ded1d 100644
--- a/nova/tests/unit/policies/test_server_migrations.py
+++ b/nova/tests/unit/policies/test_server_migrations.py
@@ -10,9 +10,9 @@
# License for the specific language governing permissions and limitations
# under the License.
-import fixtures
-import mock
+from unittest import mock
+import fixtures
from oslo_utils.fixture import uuidsentinel as uuids
from nova.api.openstack.compute import server_migrations
@@ -45,42 +45,18 @@ class ServerMigrationsPolicyTest(base.BasePolicyTest):
vm_state=vm_states.ACTIVE)
self.mock_get.return_value = self.instance
- # Check that admin is able to perform operations
+ # With legacy rule, any admin is able to perform operations
# for server migrations.
- self.admin_authorized_contexts = [
+ self.project_admin_authorized_contexts = [
self.legacy_admin_context, self.system_admin_context,
self.project_admin_context]
- # Check that non-admin is not able to perform operations
- # for server migrations.
- self.admin_unauthorized_contexts = [
- self.system_member_context, self.system_reader_context,
- self.system_foo_context, self.project_member_context,
- self.other_project_member_context,
- self.other_project_reader_context,
- self.project_foo_context, self.project_reader_context
- ]
- # Check that system-reader are able to perform operations
- # for server migrations.
- self.reader_authorized_contexts = [
- self.system_admin_context, self.system_member_context,
- self.system_reader_context, self.legacy_admin_context,
- self.project_admin_context]
- # Check that non-system-reader are not able to perform operations
- # for server migrations.
- self.reader_unauthorized_contexts = [
- self.system_foo_context, self.other_project_member_context,
- self.project_foo_context, self.project_member_context,
- self.project_reader_context,
- self.other_project_reader_context,
- ]
@mock.patch('nova.compute.api.API.get_migrations_in_progress_by_instance')
def test_list_server_migrations_policy(self, mock_get):
rule_name = policies.POLICY_ROOT % 'index'
- self.common_policy_check(self.reader_authorized_contexts,
- self.reader_unauthorized_contexts,
- rule_name, self.controller.index,
- self.req, self.instance.uuid)
+ self.common_policy_auth(self.project_admin_authorized_contexts,
+ rule_name, self.controller.index,
+ self.req, self.instance.uuid)
@mock.patch('nova.api.openstack.compute.server_migrations.output')
@mock.patch('nova.compute.api.API.get_migration_by_id_and_instance')
@@ -90,27 +66,32 @@ class ServerMigrationsPolicyTest(base.BasePolicyTest):
migration_type='live-migration',
status='running',
)
- self.common_policy_check(self.reader_authorized_contexts,
- self.reader_unauthorized_contexts,
- rule_name, self.controller.show,
- self.req, self.instance.uuid, 11111)
+ self.common_policy_auth(self.project_admin_authorized_contexts,
+ rule_name, self.controller.show,
+ self.req, self.instance.uuid, 11111)
@mock.patch('nova.compute.api.API.live_migrate_abort')
def test_delete_server_migrations_policy(self, mock_delete):
rule_name = policies.POLICY_ROOT % 'delete'
- self.common_policy_check(self.admin_authorized_contexts,
- self.admin_unauthorized_contexts,
- rule_name, self.controller.delete,
- self.req, self.instance.uuid, 11111)
+ self.common_policy_auth(self.project_admin_authorized_contexts,
+ rule_name, self.controller.delete,
+ self.req, self.instance.uuid, 11111)
@mock.patch('nova.compute.api.API.live_migrate_force_complete')
def test_force_delete_server_migrations_policy(self, mock_force):
rule_name = policies.POLICY_ROOT % 'force_complete'
- self.common_policy_check(self.admin_authorized_contexts,
- self.admin_unauthorized_contexts,
- rule_name, self.controller._force_complete,
- self.req, self.instance.uuid, 11111,
- body={"force_complete": None})
+ self.common_policy_auth(self.project_admin_authorized_contexts,
+ rule_name, self.controller._force_complete,
+ self.req, self.instance.uuid, 11111,
+ body={"force_complete": None})
+
+
+class ServerMigrationsNoLegacyNoScopeTest(ServerMigrationsPolicyTest):
+ """Test Server Migrations API policies with deprecated rules
+ disabled, but scope checking still disabled.
+ """
+
+ without_deprecated_rules = True
class ServerMigrationsScopeTypePolicyTest(ServerMigrationsPolicyTest):
@@ -126,48 +107,21 @@ class ServerMigrationsScopeTypePolicyTest(ServerMigrationsPolicyTest):
def setUp(self):
super(ServerMigrationsScopeTypePolicyTest, self).setUp()
self.flags(enforce_scope=True, group="oslo_policy")
+ # With scope enabled, system admin is not allowed.
+ self.project_admin_authorized_contexts = [
+ self.legacy_admin_context, self.project_admin_context]
-class ServerMigrationsNoLegacyPolicyTest(ServerMigrationsScopeTypePolicyTest):
+class ServerMigrationsScopeTypeNoLegacyPolicyTest(
+ ServerMigrationsScopeTypePolicyTest):
"""Test Server Migrations APIs policies with system scope enabled,
and no more deprecated rules.
"""
without_deprecated_rules = True
- def setUp(self):
- super(ServerMigrationsNoLegacyPolicyTest, self).setUp()
- # Check that admin is able to perform operations
- # for server migrations.
- self.admin_authorized_contexts = [
- self.system_admin_context
- ]
- # Check that non-admin is not able to perform operations
- # for server migrations.
- self.admin_unauthorized_contexts = [
- self.legacy_admin_context, self.project_admin_context,
- self.system_member_context, self.system_reader_context,
- self.system_foo_context, self.project_member_context,
- self.project_reader_context, self.project_foo_context,
- self.other_project_member_context,
- self.other_project_reader_context,
- ]
- # Check that system reader is able to perform operations
- # for server migrations.
- self.reader_authorized_contexts = [
- self.system_admin_context, self.system_member_context,
- self.system_reader_context]
- # Check that non-system-reader is not able to perform operations
- # for server migrations.
- self.reader_unauthorized_contexts = [
- self.legacy_admin_context, self.project_admin_context,
- self.system_foo_context, self.project_member_context,
- self.other_project_member_context,
- self.other_project_reader_context,
- self.project_foo_context, self.project_reader_context
- ]
-
-class ServerMigrationsOverridePolicyTest(ServerMigrationsNoLegacyPolicyTest):
+class ServerMigrationsOverridePolicyTest(
+ ServerMigrationsScopeTypeNoLegacyPolicyTest):
"""Test Server Migrations APIs policies with system and project scoped
but default to system roles only are allowed for project roles
if override by operators. This test is with system scope enable
@@ -181,38 +135,16 @@ class ServerMigrationsOverridePolicyTest(ServerMigrationsNoLegacyPolicyTest):
rule_force = policies.POLICY_ROOT % 'force_complete'
rule_delete = policies.POLICY_ROOT % 'delete'
# NOTE(gmann): override the rule to project member and verify it
- # work as policy is system and projct scoped.
+ # work as policy is project scoped.
self.policy.set_rules({
- rule_show: base_policy.PROJECT_READER_OR_SYSTEM_READER,
- rule_list: base_policy.PROJECT_READER_OR_SYSTEM_READER,
- rule_force: base_policy.PROJECT_MEMBER_OR_SYSTEM_ADMIN,
- rule_delete: base_policy.PROJECT_MEMBER_OR_SYSTEM_ADMIN},
+ rule_show: base_policy.PROJECT_READER,
+ rule_list: base_policy.PROJECT_READER,
+ rule_force: base_policy.PROJECT_READER,
+ rule_delete: base_policy.PROJECT_READER},
overwrite=False)
- # Check that system admin or project scoped role as override above
+ # Check that project reader as override above
# is able to migrate the server
- self.admin_authorized_contexts = [
- self.system_admin_context,
- self.project_admin_context, self.project_member_context]
- # Check that non-system admin or project role is not able to
- # migrate the server
- self.admin_unauthorized_contexts = [
- self.legacy_admin_context, self.system_member_context,
- self.system_reader_context, self.system_foo_context,
- self.other_project_member_context,
- self.project_foo_context, self.project_reader_context,
- self.other_project_reader_context,
- ]
- # Check that system reader is able to perform operations
- # for server migrations.
- self.reader_authorized_contexts = [
- self.system_admin_context, self.system_member_context,
- self.system_reader_context, self.project_admin_context,
- self.project_member_context, self.project_reader_context]
- # Check that non-system-reader is not able to perform operations
- # for server migrations.
- self.reader_unauthorized_contexts = [
- self.legacy_admin_context, self.system_foo_context,
- self.other_project_member_context, self.project_foo_context,
- self.other_project_reader_context,
- ]
+ self.project_admin_authorized_contexts = [
+ self.project_admin_context, self.project_member_context,
+ self.project_reader_context]
diff --git a/nova/tests/unit/policies/test_server_password.py b/nova/tests/unit/policies/test_server_password.py
index 1a28cf9f20..b163c6c562 100644
--- a/nova/tests/unit/policies/test_server_password.py
+++ b/nova/tests/unit/policies/test_server_password.py
@@ -10,8 +10,9 @@
# License for the specific language governing permissions and limitations
# under the License.
+from unittest import mock
+
import fixtures
-import mock
from oslo_utils.fixture import uuidsentinel as uuids
from nova.api.openstack.compute import server_password
@@ -41,51 +42,55 @@ class ServerPasswordPolicyTest(base.BasePolicyTest):
id=1, uuid=uuids.fake_id, project_id=self.project_id,
system_metadata={}, expected_attrs=['system_metadata'])
self.mock_get.return_value = self.instance
-
- # Check that admin or and server owner is able to
- # delete the server password.
- self.admin_or_owner_authorized_contexts = [
- self.legacy_admin_context, self.system_admin_context,
- self.project_admin_context, self.project_member_context,
- self.project_reader_context, self.project_foo_context]
- # Check that non-admin/owner is not able to delete
- # the server password.
- self.admin_or_owner_unauthorized_contexts = [
- self.system_member_context, self.system_reader_context,
- self.system_foo_context, self.other_project_member_context,
- self.other_project_reader_context
- ]
- # Check that admin or and server owner is able to get
- # the server password.
- self.reader_authorized_contexts = [
+ # With legacy rule and no scope checks, all admin, project members
+ # project reader or other project role(because legacy rule allow server
+ # owner- having same project id and no role check) is able to delete,
+ # the server Password.
+ self.project_member_authorized_contexts = [
self.legacy_admin_context, self.system_admin_context,
- self.system_member_context, self.system_reader_context,
self.project_admin_context, self.project_member_context,
self.project_reader_context, self.project_foo_context]
- # Check that non-admin/owner is not able to get
- # the server password.
- self.reader_unauthorized_contexts = [
- self.system_foo_context, self.other_project_member_context,
- self.other_project_reader_context
- ]
+ # and they can get their own server password.
+ self.project_reader_authorized_contexts = (
+ self.project_member_authorized_contexts)
@mock.patch('nova.api.metadata.password.extract_password')
def test_index_server_password_policy(self, mock_pass):
rule_name = policies.BASE_POLICY_NAME % 'show'
- self.common_policy_check(self.reader_authorized_contexts,
- self.reader_unauthorized_contexts,
- rule_name,
- self.controller.index,
- self.req, self.instance.uuid)
+ self.common_policy_auth(self.project_reader_authorized_contexts,
+ rule_name,
+ self.controller.index,
+ self.req, self.instance.uuid)
@mock.patch('nova.api.metadata.password.convert_password')
def test_clear_server_password_policy(self, mock_pass):
rule_name = policies.BASE_POLICY_NAME % 'clear'
- self.common_policy_check(self.admin_or_owner_authorized_contexts,
- self.admin_or_owner_unauthorized_contexts,
- rule_name,
- self.controller.clear,
- self.req, self.instance.uuid)
+ self.common_policy_auth(self.project_member_authorized_contexts,
+ rule_name,
+ self.controller.clear,
+ self.req, self.instance.uuid)
+
+
+class ServerPasswordNoLegacyNoScopePolicyTest(ServerPasswordPolicyTest):
+ """Test Server Password APIs policies with no legacy deprecated rules
+ and no scope checks.
+
+ """
+
+ without_deprecated_rules = True
+ rules_without_deprecation = {
+ policies.BASE_POLICY_NAME % 'show':
+ base_policy.PROJECT_READER_OR_ADMIN,
+ policies.BASE_POLICY_NAME % 'clear':
+ base_policy.PROJECT_MEMBER_OR_ADMIN}
+
+ def setUp(self):
+ super(ServerPasswordNoLegacyNoScopePolicyTest, self).setUp()
+ # With no legacy rule, legacy admin loose power.
+ self.project_member_authorized_contexts = (
+ self.project_member_or_admin_with_no_scope_no_legacy)
+ self.project_reader_authorized_contexts = (
+ self.project_reader_or_admin_with_no_scope_no_legacy)
class ServerPasswordScopeTypePolicyTest(ServerPasswordPolicyTest):
@@ -101,50 +106,30 @@ class ServerPasswordScopeTypePolicyTest(ServerPasswordPolicyTest):
def setUp(self):
super(ServerPasswordScopeTypePolicyTest, self).setUp()
self.flags(enforce_scope=True, group="oslo_policy")
+ # With Scope enable, system users no longer allowed.
+ self.project_member_authorized_contexts = (
+ self.project_m_r_or_admin_with_scope_and_legacy)
+ self.project_reader_authorized_contexts = (
+ self.project_m_r_or_admin_with_scope_and_legacy)
-class ServerPasswordNoLegacyPolicyTest(ServerPasswordScopeTypePolicyTest):
+class ServerPasswordScopeTypeNoLegacyPolicyTest(
+ ServerPasswordScopeTypePolicyTest):
"""Test Server Password APIs policies with system scope enabled,
- and no more deprecated rules that allow the legacy admin API to
- access system_admin_or_owner APIs.
+ and no more deprecated rules.
"""
without_deprecated_rules = True
rules_without_deprecation = {
policies.BASE_POLICY_NAME % 'show':
- base_policy.PROJECT_READER_OR_SYSTEM_READER,
+ base_policy.PROJECT_READER_OR_ADMIN,
policies.BASE_POLICY_NAME % 'clear':
- base_policy.PROJECT_MEMBER_OR_SYSTEM_ADMIN}
+ base_policy.PROJECT_MEMBER_OR_ADMIN}
def setUp(self):
- super(ServerPasswordNoLegacyPolicyTest, self).setUp()
-
- # Check that system or projct admin or owner is able to clear
- # server password.
- self.admin_or_owner_authorized_contexts = [
- self.system_admin_context,
- self.project_admin_context, self.project_member_context]
- # Check that non-system and non-admin/owner is not able to clear
- # server password.
- self.admin_or_owner_unauthorized_contexts = [
- self.legacy_admin_context, self.project_reader_context,
- self.project_foo_context,
- self.system_member_context, self.system_reader_context,
- self.system_foo_context, self.other_project_member_context,
- self.other_project_reader_context]
-
- # Check that system reader or projct owner is able to get
- # server password.
- self.reader_authorized_contexts = [
- self.system_admin_context,
- self.project_admin_context, self.system_member_context,
- self.system_reader_context, self.project_reader_context,
- self.project_member_context,
- ]
-
- # Check that non-system reader nd non-admin/owner is not able to get
- # server password.
- self.reader_unauthorized_contexts = [
- self.legacy_admin_context, self.project_foo_context,
- self.system_foo_context, self.other_project_member_context,
- self.other_project_reader_context
- ]
+ super(ServerPasswordScopeTypeNoLegacyPolicyTest, self).setUp()
+ # With no legacy and scope enable, only project admin, member,
+ # and reader will be able to allowed operation on server password.
+ self.project_member_authorized_contexts = (
+ self.project_member_or_admin_with_scope_no_legacy)
+ self.project_reader_authorized_contexts = (
+ self.project_reader_or_admin_with_scope_no_legacy)
diff --git a/nova/tests/unit/policies/test_server_tags.py b/nova/tests/unit/policies/test_server_tags.py
index b7efe86364..412177408c 100644
--- a/nova/tests/unit/policies/test_server_tags.py
+++ b/nova/tests/unit/policies/test_server_tags.py
@@ -10,8 +10,9 @@
# License for the specific language governing permissions and limitations
# under the License.
+from unittest import mock
+
import fixtures
-import mock
from oslo_utils.fixture import uuidsentinel as uuids
from nova.api.openstack.compute import server_tags
@@ -50,51 +51,32 @@ class ServerTagsPolicyTest(base.BasePolicyTest):
self.stub_out('nova.objects.InstanceMapping.get_by_instance_uuid',
lambda s, c, u: inst_map)
- # Check that admin or and server owner is able to perform
+ # With legacy rule and no scope checks, all admin, project members
+ # project reader or other project role(because legacy rule allow server
+ # owner- having same project id and no role check) is able to perform,
# operations on server tags.
- self.admin_or_owner_authorized_contexts = [
- self.legacy_admin_context, self.system_admin_context,
- self.project_admin_context, self.project_member_context,
- self.project_reader_context, self.project_foo_context
- ]
- # Check that non-admin/owner is not able to perform operations
- # on server tags
- self.admin_or_owner_unauthorized_contexts = [
- self.system_member_context, self.system_reader_context,
- self.system_foo_context, self.other_project_member_context,
- self.other_project_reader_context
- ]
- # Check that reader or and server owner is able to perform operations
- # on server tags.
- self.reader_or_owner_authorized_contexts = [
+ self.project_member_authorized_contexts = [
self.legacy_admin_context, self.system_admin_context,
- self.system_member_context, self.system_reader_context,
self.project_admin_context, self.project_member_context,
self.project_reader_context, self.project_foo_context]
- # Check that non-reader/owner is not able to perform operations
- # on server tags.
- self.reader_or_owner_unauthorized_contexts = [
- self.system_foo_context, self.other_project_member_context,
- self.other_project_reader_context
- ]
+ self.project_reader_authorized_contexts = (
+ self.project_member_authorized_contexts)
@mock.patch('nova.objects.TagList.get_by_resource_id')
def test_index_server_tags_policy(self, mock_tag):
rule_name = policies.POLICY_ROOT % 'index'
- self.common_policy_check(self.reader_or_owner_authorized_contexts,
- self.reader_or_owner_unauthorized_contexts,
- rule_name,
- self.controller.index,
- self.req, self.instance.uuid)
+ self.common_policy_auth(self.project_reader_authorized_contexts,
+ rule_name,
+ self.controller.index,
+ self.req, self.instance.uuid)
@mock.patch('nova.objects.Tag.exists')
def test_show_server_tags_policy(self, mock_exists):
rule_name = policies.POLICY_ROOT % 'show'
- self.common_policy_check(self.reader_or_owner_authorized_contexts,
- self.reader_or_owner_unauthorized_contexts,
- rule_name,
- self.controller.show,
- self.req, self.instance.uuid, uuids.fake_id)
+ self.common_policy_auth(self.project_reader_authorized_contexts,
+ rule_name,
+ self.controller.show,
+ self.req, self.instance.uuid, uuids.fake_id)
@mock.patch('nova.notifications.base.send_instance_update_notification')
@mock.patch('nova.db.main.api.instance_tag_get_by_instance_uuid')
@@ -102,33 +84,30 @@ class ServerTagsPolicyTest(base.BasePolicyTest):
def test_update_server_tags_policy(self, mock_create, mock_tag,
mock_notf):
rule_name = policies.POLICY_ROOT % 'update'
- self.common_policy_check(self.admin_or_owner_authorized_contexts,
- self.admin_or_owner_unauthorized_contexts,
- rule_name,
- self.controller.update,
- self.req, self.instance.uuid, uuids.fake_id,
- body=None)
+ self.common_policy_auth(self.project_member_authorized_contexts,
+ rule_name,
+ self.controller.update,
+ self.req, self.instance.uuid, uuids.fake_id,
+ body=None)
@mock.patch('nova.notifications.base.send_instance_update_notification')
@mock.patch('nova.db.main.api.instance_tag_set')
def test_update_all_server_tags_policy(self, mock_set, mock_notf):
rule_name = policies.POLICY_ROOT % 'update_all'
- self.common_policy_check(self.admin_or_owner_authorized_contexts,
- self.admin_or_owner_unauthorized_contexts,
- rule_name,
- self.controller.update_all,
- self.req, self.instance.uuid,
- body={'tags': ['tag1', 'tag2']})
+ self.common_policy_auth(self.project_member_authorized_contexts,
+ rule_name,
+ self.controller.update_all,
+ self.req, self.instance.uuid,
+ body={'tags': ['tag1', 'tag2']})
@mock.patch('nova.notifications.base.send_instance_update_notification')
@mock.patch('nova.objects.TagList.destroy')
def test_delete_all_server_tags_policy(self, mock_destroy, mock_notf):
rule_name = policies.POLICY_ROOT % 'delete_all'
- self.common_policy_check(self.admin_or_owner_authorized_contexts,
- self.admin_or_owner_unauthorized_contexts,
- rule_name,
- self.controller.delete_all,
- self.req, self.instance.uuid)
+ self.common_policy_auth(self.project_member_authorized_contexts,
+ rule_name,
+ self.controller.delete_all,
+ self.req, self.instance.uuid)
@mock.patch('nova.notifications.base.send_instance_update_notification')
@mock.patch('nova.db.main.api.instance_tag_get_by_instance_uuid')
@@ -136,11 +115,27 @@ class ServerTagsPolicyTest(base.BasePolicyTest):
def test_delete_server_tags_policy(self, mock_destroy, mock_get,
mock_notf):
rule_name = policies.POLICY_ROOT % 'delete'
- self.common_policy_check(self.admin_or_owner_authorized_contexts,
- self.admin_or_owner_unauthorized_contexts,
- rule_name,
- self.controller.delete,
- self.req, self.instance.uuid, uuids.fake_id)
+ self.common_policy_auth(self.project_member_authorized_contexts,
+ rule_name,
+ self.controller.delete,
+ self.req, self.instance.uuid, uuids.fake_id)
+
+
+class ServerTagsNoLegacyNoScopePolicyTest(ServerTagsPolicyTest):
+ """Test Server Tags APIs policies with no legacy deprecated rules
+ and no scope checks.
+
+ """
+
+ without_deprecated_rules = True
+
+ def setUp(self):
+ super(ServerTagsNoLegacyNoScopePolicyTest, self).setUp()
+ # With no legacy rule, legacy admin loose power.
+ self.project_member_authorized_contexts = (
+ self.project_member_or_admin_with_no_scope_no_legacy)
+ self.project_reader_authorized_contexts = (
+ self.project_reader_or_admin_with_no_scope_no_legacy)
class ServerTagsScopeTypePolicyTest(ServerTagsPolicyTest):
@@ -156,9 +151,14 @@ class ServerTagsScopeTypePolicyTest(ServerTagsPolicyTest):
def setUp(self):
super(ServerTagsScopeTypePolicyTest, self).setUp()
self.flags(enforce_scope=True, group="oslo_policy")
+ # With Scope enable, system users no longer allowed.
+ self.project_member_authorized_contexts = (
+ self.project_m_r_or_admin_with_scope_and_legacy)
+ self.project_reader_authorized_contexts = (
+ self.project_m_r_or_admin_with_scope_and_legacy)
-class ServerTagsNoLegacyPolicyTest(ServerTagsScopeTypePolicyTest):
+class ServerTagsScopeTypeNoLegacyPolicyTest(ServerTagsScopeTypePolicyTest):
"""Test Server Tags APIs policies with system scope enabled,
and no more deprecated rules that allow the legacy admin API to
access system APIs.
@@ -166,32 +166,10 @@ class ServerTagsNoLegacyPolicyTest(ServerTagsScopeTypePolicyTest):
without_deprecated_rules = True
def setUp(self):
- super(ServerTagsNoLegacyPolicyTest, self).setUp()
- # Check that system admin or project member is able to
- # perform operations on server tags.
- self.admin_or_owner_authorized_contexts = [
- self.system_admin_context, self.project_admin_context,
- self.project_member_context]
- # Check that non-system/admin/member is not able to
- # perform operations on server tags.
- self.admin_or_owner_unauthorized_contexts = [
- self.legacy_admin_context, self.system_reader_context,
- self.system_foo_context, self.system_member_context,
- self.project_reader_context, self.project_foo_context,
- self.other_project_member_context,
- self.other_project_reader_context
- ]
- # Check that system reader or owner is able to
- # perform operations on server tags.
- self.reader_or_owner_authorized_contexts = [
- self.system_admin_context,
- self.system_member_context, self.system_reader_context,
- self.project_admin_context, self.project_member_context,
- self.project_reader_context]
- # Check that non-system/reader/owner is not able to
- # perform operations on server tags.
- self.reader_or_owner_unauthorized_contexts = [
- self.legacy_admin_context, self.system_foo_context,
- self.project_foo_context, self.other_project_member_context,
- self.other_project_reader_context
- ]
+ super(ServerTagsScopeTypeNoLegacyPolicyTest, self).setUp()
+ # With no legacy and scope enable, only project admin, member,
+ # and reader will be able to allowed operation on server tags.
+ self.project_member_authorized_contexts = (
+ self.project_member_or_admin_with_scope_no_legacy)
+ self.project_reader_authorized_contexts = (
+ self.project_reader_or_admin_with_scope_no_legacy)
diff --git a/nova/tests/unit/policies/test_server_topology.py b/nova/tests/unit/policies/test_server_topology.py
index 51a3206a97..e2f81dfaad 100644
--- a/nova/tests/unit/policies/test_server_topology.py
+++ b/nova/tests/unit/policies/test_server_topology.py
@@ -51,40 +51,23 @@ class ServerTopologyPolicyTest(base.BasePolicyTest):
# Check that system reader or and server owner is able to get
# the server topology.
- self.system_reader_or_owner_authorized_contexts = [
+ # With legacy rule and no scope checks, all admin is able to get
+ # server topology wth host info.
+ self.project_admin_authorized_contexts = [
self.legacy_admin_context, self.system_admin_context,
- self.project_admin_context, self.project_member_context,
- self.project_reader_context, self.project_foo_context,
- self.system_member_context, self.system_reader_context]
- # Check that non-stem reader/owner is not able to get
- # the server topology.
- self.system_reader_or_owner_unauthorized_contexts = [
- self.system_foo_context, self.other_project_member_context,
- self.other_project_reader_context,
- ]
- # Check that system reader is able to get the server topology
- # host information.
- self.system_reader_authorized_contexts = [
+ self.project_admin_context]
+ # and project reader can get their server topology without host info.
+ self.project_reader_authorized_contexts = [
self.legacy_admin_context, self.system_admin_context,
- self.project_admin_context, self.system_member_context,
- self.system_reader_context]
- # Check that non-system reader is not able to get the server topology
- # host information.
- self.system_reader_unauthorized_contexts = [
- self.system_foo_context, self.project_member_context,
- self.other_project_member_context,
- self.project_foo_context, self.project_reader_context,
- self.other_project_reader_context
- ]
+ self.project_admin_context, self.project_member_context,
+ self.project_reader_context, self.project_foo_context]
def test_index_server_topology_policy(self):
rule_name = policies.BASE_POLICY_NAME % 'index'
- self.common_policy_check(
- self.system_reader_or_owner_authorized_contexts,
- self.system_reader_or_owner_unauthorized_contexts,
- rule_name,
- self.controller.index,
- self.req, self.instance.uuid)
+ self.common_policy_auth(self.project_reader_authorized_contexts,
+ rule_name,
+ self.controller.index,
+ self.req, self.instance.uuid)
def test_index_host_server_topology_policy(self):
rule_name = policies.BASE_POLICY_NAME % 'host:index'
@@ -93,9 +76,8 @@ class ServerTopologyPolicyTest(base.BasePolicyTest):
# fail first for unauthorized contexts.
rule = policies.BASE_POLICY_NAME % 'index'
self.policy.set_rules({rule: "@"}, overwrite=False)
- authorize_res, unauthorize_res = self.common_policy_check(
- self.system_reader_authorized_contexts,
- self.system_reader_unauthorized_contexts,
+ authorize_res, unauthorize_res = self.common_policy_auth(
+ self.project_admin_authorized_contexts,
rule_name, self.controller.index, self.req, self.instance.uuid,
fatal=False)
for resp in authorize_res:
@@ -106,6 +88,20 @@ class ServerTopologyPolicyTest(base.BasePolicyTest):
self.assertNotIn('cpu_pinning', resp['nodes'][0])
+class ServerTopologyNoLegacyNoScopePolicyTest(ServerTopologyPolicyTest):
+ """Test Server Topology APIs policies with no legacy deprecated rules
+ and no scope checks.
+
+ """
+
+ without_deprecated_rules = True
+
+ def setUp(self):
+ super(ServerTopologyNoLegacyNoScopePolicyTest, self).setUp()
+ self.project_reader_authorized_contexts = (
+ self.project_reader_or_admin_with_no_scope_no_legacy)
+
+
class ServerTopologyScopeTypePolicyTest(ServerTopologyPolicyTest):
"""Test Server Topology APIs policies with system scope enabled.
This class set the nova.conf [oslo_policy] enforce_scope to True
@@ -119,24 +115,15 @@ class ServerTopologyScopeTypePolicyTest(ServerTopologyPolicyTest):
def setUp(self):
super(ServerTopologyScopeTypePolicyTest, self).setUp()
self.flags(enforce_scope=True, group="oslo_policy")
-
- # Check that system reader is able to get the server topology
- # host information.
- self.system_reader_authorized_contexts = [
- self.system_admin_context, self.system_member_context,
- self.system_reader_context]
- # Check that non-system/reader is not able to get the server topology
- # host information.
- self.system_reader_unauthorized_contexts = [
- self.legacy_admin_context, self.system_foo_context,
- self.project_admin_context, self.project_member_context,
- self.other_project_member_context,
- self.project_foo_context, self.project_reader_context,
- self.other_project_reader_context,
- ]
+ # With Scope enable, system users no longer allowed.
+ self.project_admin_authorized_contexts = [
+ self.legacy_admin_context, self.project_admin_context]
+ self.project_reader_authorized_contexts = (
+ self.project_m_r_or_admin_with_scope_and_legacy)
-class ServerTopologyNoLegacyPolicyTest(ServerTopologyScopeTypePolicyTest):
+class ServerTopologyScopeTypeNoLegacyPolicyTest(
+ ServerTopologyScopeTypePolicyTest):
"""Test Server Topology APIs policies with system scope enabled,
and no more deprecated rules that allow the legacy admin API to
access system APIs.
@@ -144,18 +131,8 @@ class ServerTopologyNoLegacyPolicyTest(ServerTopologyScopeTypePolicyTest):
without_deprecated_rules = True
def setUp(self):
- super(ServerTopologyNoLegacyPolicyTest, self).setUp()
- # Check that system reader/owner is able to get
- # the server topology.
- self.system_reader_or_owner_authorized_contexts = [
- self.system_admin_context,
- self.project_admin_context, self.project_member_context,
- self.system_member_context, self.system_reader_context,
- self.project_reader_context]
- # Check that non-system/reader/owner is not able to get
- # the server topology.
- self.system_reader_or_owner_unauthorized_contexts = [
- self.legacy_admin_context, self.system_foo_context,
- self.other_project_member_context, self.project_foo_context,
- self.other_project_reader_context,
- ]
+ super(ServerTopologyScopeTypeNoLegacyPolicyTest, self).setUp()
+ # With no legacy and scope enable, only project admin, member,
+ # and reader will be able to get server topology.
+ self.project_reader_authorized_contexts = (
+ self.project_reader_or_admin_with_scope_no_legacy)
diff --git a/nova/tests/unit/policies/test_servers.py b/nova/tests/unit/policies/test_servers.py
index 33aadb948f..eee1e4ba51 100644
--- a/nova/tests/unit/policies/test_servers.py
+++ b/nova/tests/unit/policies/test_servers.py
@@ -10,9 +10,10 @@
# License for the specific language governing permissions and limitations
# under the License.
+import functools
+from unittest import mock
import fixtures
-import mock
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import timeutils
@@ -20,12 +21,14 @@ from nova.api.openstack.compute import migrate_server
from nova.api.openstack.compute import servers
from nova.compute import api as compute
from nova.compute import vm_states
+import nova.conf
from nova import exception
from nova.network import model
from nova.network import neutron
from nova import objects
from nova.objects import fields
from nova.objects.instance_group import InstanceGroup
+from nova.policies import base as base_policy
from nova.policies import extended_server_attributes as ea_policies
from nova.policies import servers as policies
from nova.tests.unit.api.openstack import fakes
@@ -33,6 +36,8 @@ from nova.tests.unit import fake_flavor
from nova.tests.unit import fake_instance
from nova.tests.unit.policies import base
+CONF = nova.conf.CONF
+
class ServersPolicyTest(base.BasePolicyTest):
"""Test Servers APIs policies with all possible context.
@@ -114,137 +119,41 @@ class ServersPolicyTest(base.BasePolicyTest):
'OS-EXT-SRV-ATTR:user_data'
]
- # Check that admin or and owner is able to update, delete
- # or perform server action.
- self.admin_or_owner_authorized_contexts = [
+ # Users that can take action on *our* project resources
+ self.project_action_authorized_contexts = set([
self.legacy_admin_context, self.system_admin_context,
self.project_admin_context, self.project_member_context,
- self.project_reader_context, self.project_foo_context]
- # Check that non-admin/owner is not able to update, delete
- # or perform server action.
- self.admin_or_owner_unauthorized_contexts = [
- self.system_member_context, self.system_reader_context,
- self.system_foo_context,
- self.other_project_member_context,
- self.other_project_reader_context
- ]
+ self.project_reader_context, self.project_foo_context,
+ ])
- # Check that system reader or owner is able to get
- # the server.
- self.system_reader_or_owner_authorized_contexts = [
- self.legacy_admin_context, self.system_admin_context,
- self.project_admin_context, self.project_member_context,
- self.project_reader_context, self.system_member_context,
- self.system_reader_context, self.project_foo_context
- ]
- self.system_reader_or_owner_unauthorized_contexts = [
- self.system_foo_context,
- self.other_project_member_context,
- self.other_project_reader_context
- ]
+ # Users that can read *our* project resources
+ self.project_reader_authorized_contexts = (
+ self.project_action_authorized_contexts)
- # Check that everyone is able to list their own server.
- self.everyone_authorized_contexts = [
- self.legacy_admin_context, self.system_admin_context,
- self.project_admin_context, self.project_member_context,
- self.project_reader_context, self.project_foo_context,
- self.system_member_context, self.system_reader_context,
- self.system_foo_context,
- self.other_project_member_context,
- self.other_project_reader_context]
- self.everyone_unauthorized_contexts = [
- ]
- # Check that admin is able to create server with host request
- # and get server extended attributes or host status.
- self.admin_authorized_contexts = [
- self.legacy_admin_context, self.system_admin_context,
- self.project_admin_context]
- # Check that non-admin is not able to create server with host request
- # and get server extended attributes or host status.
- self.admin_unauthorized_contexts = [
- self.system_member_context, self.system_reader_context,
- self.system_foo_context, self.project_member_context,
- self.project_reader_context, self.project_foo_context,
- self.other_project_member_context,
- self.other_project_reader_context
- ]
- # Check that sustem reader is able to list the server
- # for all projects.
- self.system_reader_authorized_contexts = [
- self.legacy_admin_context, self.system_admin_context,
- self.project_admin_context, self.system_member_context,
- self.system_reader_context]
- # Check that non-system reader is not able to list the server
- # for all projects.
- self.system_reader_unauthorized_contexts = [
- self.system_foo_context, self.project_member_context,
- self.project_reader_context, self.project_foo_context,
- self.other_project_member_context,
- self.other_project_reader_context
- ]
- # Check that project member is able to create serve
- self.project_member_authorized_contexts = [
+ # Users that _see_ project-scoped resources that they own
+ self.everyone_authorized_contexts = set(self.all_contexts)
+
+ # Users that can _do_ things to project-scoped resources they own
+ self.project_member_authorized_contexts = set(self.all_contexts)
+
+ # Users able to do admin things on project resources
+ self.project_admin_authorized_contexts = set([
self.legacy_admin_context, self.system_admin_context,
- self.project_admin_context, self.project_member_context,
- self.system_member_context, self.system_reader_context,
- self.other_project_member_context, self.system_foo_context,
- self.project_reader_context, self.project_foo_context,
- self.other_project_reader_context]
- # Check that non-project member is not able to create server
- self.project_member_unauthorized_contexts = [
- ]
- # Check that project admin is able to create server with requested
- # destination.
- self.project_admin_authorized_contexts = [
+ self.project_admin_context])
+
+ # Admin (for APIs does not pass the project id as policy target
+ # for example, create server, list detail server) able to get
+ # all projects servers, create server on specific host etc.
+ # This is admin on any project because policy does not check
+ # the project id but they will be able to create server, get
+ # servers(unless all-tenant policy is allowed) of their own
+ # project only.
+ self.all_projects_admin_authorized_contexts = set([
self.legacy_admin_context, self.system_admin_context,
- self.project_admin_context]
- # Check that non-project admin is not able to create server with
- # requested destination
- self.project_admin_unauthorized_contexts = [
- self.system_member_context, self.system_reader_context,
- self.system_foo_context, self.project_member_context,
- self.project_reader_context, self.project_foo_context,
- self.other_project_member_context,
- self.other_project_reader_context
- ]
- # Check that no one is able to resize cross cell.
+ self.project_admin_context])
+
+ # Users able to do cross-cell migrations
self.cross_cell_authorized_contexts = []
- self.cross_cell_unauthorized_contexts = [
- self.legacy_admin_context, self.system_admin_context,
- self.project_admin_context, self.project_member_context,
- self.project_reader_context, self.project_foo_context,
- self.system_member_context, self.system_reader_context,
- self.system_foo_context,
- self.other_project_member_context,
- self.other_project_reader_context]
- # Check that admin is able to access the zero disk flavor
- # and external network policies.
- self.zero_disk_external_net_authorized_contexts = [
- self.legacy_admin_context, self.system_admin_context,
- self.project_admin_context]
- # Check that non-admin is not able to caccess the zero disk flavor
- # and external network policies.
- self.zero_disk_external_net_unauthorized_contexts = [
- self.system_member_context, self.system_reader_context,
- self.system_foo_context, self.project_member_context,
- self.project_reader_context, self.project_foo_context,
- self.other_project_member_context,
- self.other_project_reader_context
- ]
- # Check that admin is able to get server extended attributes
- # or host status.
- self.server_attr_admin_authorized_contexts = [
- self.legacy_admin_context, self.system_admin_context,
- self.project_admin_context]
- # Check that non-admin is not able to get server extended attributes
- # or host status.
- self.server_attr_admin_unauthorized_contexts = [
- self.system_member_context, self.system_reader_context,
- self.system_foo_context, self.project_member_context,
- self.project_reader_context, self.project_foo_context,
- self.other_project_member_context,
- self.other_project_reader_context
- ]
def test_index_server_policy(self):
@@ -261,9 +170,8 @@ class ServersPolicyTest(base.BasePolicyTest):
self.mock_get_all.side_effect = fake_get_all
rule_name = policies.SERVERS % 'index'
- self.common_policy_check(
+ self.common_policy_auth(
self.everyone_authorized_contexts,
- self.everyone_unauthorized_contexts,
rule_name,
self.controller.index,
self.req)
@@ -287,11 +195,16 @@ class ServersPolicyTest(base.BasePolicyTest):
self.mock_get_all.side_effect = fake_get_all
- self.common_policy_check(self.system_reader_authorized_contexts,
- self.system_reader_unauthorized_contexts,
- rule_name,
- self.controller.index,
- req)
+ if not CONF.oslo_policy.enforce_scope:
+ check_rule = rule_name
+ else:
+ check_rule = functools.partial(
+ base.rule_if_system, rule, rule_name)
+
+ self.common_policy_auth(self.all_projects_admin_authorized_contexts,
+ check_rule,
+ self.controller.index,
+ req)
@mock.patch('nova.compute.api.API.get_all')
def test_detail_list_server_policy(self, mock_get):
@@ -309,9 +222,8 @@ class ServersPolicyTest(base.BasePolicyTest):
self.mock_get_all.side_effect = fake_get_all
rule_name = policies.SERVERS % 'detail'
- self.common_policy_check(
+ self.common_policy_auth(
self.everyone_authorized_contexts,
- self.everyone_unauthorized_contexts,
rule_name,
self.controller.detail,
self.req)
@@ -335,11 +247,16 @@ class ServersPolicyTest(base.BasePolicyTest):
self.mock_get_all.side_effect = fake_get_all
- self.common_policy_check(self.system_reader_authorized_contexts,
- self.system_reader_unauthorized_contexts,
- rule_name,
- self.controller.detail,
- req)
+ if not CONF.oslo_policy.enforce_scope:
+ check_rule = rule_name
+ else:
+ check_rule = functools.partial(
+ base.rule_if_system, rule, rule_name)
+
+ self.common_policy_auth(self.all_projects_admin_authorized_contexts,
+ check_rule,
+ self.controller.detail,
+ req)
def test_index_server_allow_all_filters_policy(self):
# 'index' policy is checked before 'allow_all_filters' so
@@ -353,9 +270,9 @@ class ServersPolicyTest(base.BasePolicyTest):
expected_attrs=None, sort_keys=None, sort_dirs=None,
cell_down_support=False, all_tenants=False):
self.assertIsNotNone(search_opts)
- if context in self.system_reader_unauthorized_contexts:
+ if context not in self.all_projects_admin_authorized_contexts:
self.assertNotIn('host', search_opts)
- if context in self.system_reader_authorized_contexts:
+ if context in self.all_projects_admin_authorized_contexts:
self.assertIn('host', search_opts)
return objects.InstanceList(objects=self.servers)
@@ -363,9 +280,8 @@ class ServersPolicyTest(base.BasePolicyTest):
req = fakes.HTTPRequest.blank('/servers?host=1')
rule_name = policies.SERVERS % 'allow_all_filters'
- self.common_policy_check(
- self.system_reader_authorized_contexts,
- self.system_reader_unauthorized_contexts,
+ self.common_policy_auth(
+ self.all_projects_admin_authorized_contexts,
rule_name,
self.controller.index,
req, fatal=False)
@@ -382,18 +298,17 @@ class ServersPolicyTest(base.BasePolicyTest):
expected_attrs=None, sort_keys=None, sort_dirs=None,
cell_down_support=False, all_tenants=False):
self.assertIsNotNone(search_opts)
- if context in self.system_reader_unauthorized_contexts:
+ if context not in self.all_projects_admin_authorized_contexts:
self.assertNotIn('host', search_opts)
- if context in self.system_reader_authorized_contexts:
+ if context in self.all_projects_admin_authorized_contexts:
self.assertIn('host', search_opts)
return objects.InstanceList(objects=self.servers)
self.mock_get_all.side_effect = fake_get_all
req = fakes.HTTPRequest.blank('/servers?host=1')
rule_name = policies.SERVERS % 'allow_all_filters'
- self.common_policy_check(
- self.system_reader_authorized_contexts,
- self.system_reader_unauthorized_contexts,
+ self.common_policy_auth(
+ self.all_projects_admin_authorized_contexts,
rule_name,
self.controller.detail,
req, fatal=False)
@@ -401,22 +316,117 @@ class ServersPolicyTest(base.BasePolicyTest):
@mock.patch('nova.objects.BlockDeviceMappingList.bdms_by_instance_uuid')
def test_show_server_policy(self, mock_bdm):
rule_name = policies.SERVERS % 'show'
- self.common_policy_check(
- self.system_reader_or_owner_authorized_contexts,
- self.system_reader_or_owner_unauthorized_contexts,
+ # Show includes readers
+ self.common_policy_auth(
+ self.project_reader_authorized_contexts,
rule_name,
self.controller.show,
self.req, self.instance.uuid)
+ @mock.patch('nova.objects.BlockDeviceMappingList.bdms_by_instance_uuid')
+ @mock.patch('nova.compute.api.API.get_instance_host_status')
+ def test_server_show_with_extra_specs_policy(self, mock_get, mock_block):
+ rule = policies.SERVERS % 'show'
+ # server 'show' policy is checked before flavor extra specs
+ # policy so we have to allow it for everyone otherwise it will fail
+ # first for unauthorized contexts.
+ self.policy.set_rules({rule: "@"}, overwrite=False)
+ req = fakes.HTTPRequest.blank('', version='2.47')
+ rule_name = policies.SERVERS % 'show:flavor-extra-specs'
+ authorize_res, unauthorize_res = self.common_policy_auth(
+ self.project_reader_authorized_contexts,
+ rule_name, self.controller.show, req,
+ self.instance.uuid, fatal=False)
+ for resp in authorize_res:
+ self.assertIn('extra_specs', resp['server']['flavor'])
+ for resp in unauthorize_res:
+ self.assertNotIn('extra_specs', resp['server']['flavor'])
+
+ @mock.patch('nova.compute.api.API.get_all')
+ def test_server_detail_with_extra_specs_policy(self, mock_get):
+
+ def fake_get_all(context, search_opts=None,
+ limit=None, marker=None,
+ expected_attrs=None, sort_keys=None, sort_dirs=None,
+ cell_down_support=False, all_tenants=False):
+ self.assertIsNotNone(search_opts)
+ if 'project_id' in search_opts or 'user_id' in search_opts:
+ return objects.InstanceList(objects=self.servers)
+ else:
+ raise
+
+ self.mock_get_all.side_effect = fake_get_all
+ rule = policies.SERVERS % 'detail'
+ # server 'detail' policy is checked before flavor extra specs
+ # policy so we have to allow it for everyone otherwise it will fail
+ # first for unauthorized contexts.
+ self.policy.set_rules({rule: "@"}, overwrite=False)
+ req = fakes.HTTPRequest.blank('', version='2.47')
+ rule_name = policies.SERVERS % 'show:flavor-extra-specs'
+ authorize_res, unauthorize_res = self.common_policy_auth(
+ self.everyone_authorized_contexts,
+ rule_name, self.controller.detail, req,
+ fatal=False)
+ for resp in authorize_res:
+ self.assertIn('extra_specs', resp['servers'][0]['flavor'])
+ for resp in unauthorize_res:
+ self.assertNotIn('extra_specs', resp['servers'][0]['flavor'])
+
+ @mock.patch('nova.objects.BlockDeviceMappingList.bdms_by_instance_uuid')
+ @mock.patch('nova.compute.api.API.get_instance_host_status')
+ @mock.patch('nova.compute.api.API.rebuild')
+ def test_server_rebuild_with_extra_specs_policy(self, mock_rebuild,
+ mock_get, mock_bdm):
+ rule = policies.SERVERS % 'rebuild'
+ # server 'rebuild' policy is checked before flavor extra specs
+ # policy so we have to allow it for everyone otherwise it will fail
+ # first for unauthorized contexts.
+ self.policy.set_rules({rule: "@"}, overwrite=False)
+ req = fakes.HTTPRequest.blank('', version='2.47')
+ rule_name = policies.SERVERS % 'show:flavor-extra-specs'
+ authorize_res, unauthorize_res = self.common_policy_auth(
+ self.project_reader_authorized_contexts,
+ rule_name, self.controller._action_rebuild,
+ req, self.instance.uuid,
+ body={'rebuild': {"imageRef": uuids.fake_id}},
+ fatal=False)
+ for resp in authorize_res:
+ self.assertIn('extra_specs', resp.obj['server']['flavor'])
+ for resp in unauthorize_res:
+ self.assertNotIn('extra_specs', resp.obj['server']['flavor'])
+
+ @mock.patch('nova.objects.BlockDeviceMappingList.bdms_by_instance_uuid')
+ @mock.patch.object(InstanceGroup, 'get_by_instance_uuid')
+ @mock.patch('nova.compute.api.API.update_instance')
+ def test_server_update_with_extra_specs_policy(self,
+ mock_update, mock_group, mock_bdm):
+ mock_update.return_value = self.instance
+ rule = policies.SERVERS % 'update'
+ # server 'update' policy is checked before flavor extra specs
+ # policy so we have to allow it for everyone otherwise it will fail
+ # first for unauthorized contexts.
+ self.policy.set_rules({rule: "@"}, overwrite=False)
+ req = fakes.HTTPRequest.blank('', version='2.47')
+ rule_name = policies.SERVERS % 'show:flavor-extra-specs'
+ authorize_res, unauthorize_res = self.common_policy_auth(
+ self.project_reader_authorized_contexts,
+ rule_name, self.controller.update,
+ req, self.instance.uuid,
+ body={'server': {'name': 'test'}},
+ fatal=False)
+ for resp in authorize_res:
+ self.assertIn('extra_specs', resp['server']['flavor'])
+ for resp in unauthorize_res:
+ self.assertNotIn('extra_specs', resp['server']['flavor'])
+
@mock.patch('nova.compute.api.API.create')
def test_create_server_policy(self, mock_create):
mock_create.return_value = ([self.instance], '')
rule_name = policies.SERVERS % 'create'
- self.common_policy_check(self.project_member_authorized_contexts,
- self.project_member_unauthorized_contexts,
- rule_name,
- self.controller.create,
- self.req, body=self.body)
+ self.common_policy_auth(self.project_member_authorized_contexts,
+ rule_name,
+ self.controller.create,
+ self.req, body=self.body)
@mock.patch('nova.compute.api.API.create')
@mock.patch('nova.compute.api.API.parse_availability_zone')
@@ -431,11 +441,10 @@ class ServersPolicyTest(base.BasePolicyTest):
self.policy.set_rules({rule: "@"}, overwrite=False)
mock_create.return_value = ([self.instance], '')
mock_az.return_value = ('test', 'host', None)
- self.common_policy_check(self.project_admin_authorized_contexts,
- self.project_admin_unauthorized_contexts,
- self.rule_forced_host,
- self.controller.create,
- self.req, body=self.body)
+ self.common_policy_auth(self.all_projects_admin_authorized_contexts,
+ self.rule_forced_host,
+ self.controller.create,
+ self.req, body=self.body)
@mock.patch('nova.compute.api.API.create')
def test_create_attach_volume_server_policy(self, mock_create):
@@ -453,11 +462,10 @@ class ServersPolicyTest(base.BasePolicyTest):
'block_device_mapping': [{'device_name': 'foo'}],
},
}
- self.common_policy_check(self.project_member_authorized_contexts,
- self.project_member_unauthorized_contexts,
- self.rule_attach_volume,
- self.controller.create,
- self.req, body=body)
+ self.common_policy_auth(self.project_member_authorized_contexts,
+ self.rule_attach_volume,
+ self.controller.create,
+ self.req, body=body)
@mock.patch('nova.compute.api.API.create')
def test_create_attach_network_server_policy(self, mock_create):
@@ -477,11 +485,10 @@ class ServersPolicyTest(base.BasePolicyTest):
}],
},
}
- self.common_policy_check(self.project_member_authorized_contexts,
- self.project_member_unauthorized_contexts,
- self.rule_attach_network,
- self.controller.create,
- self.req, body=body)
+ self.common_policy_auth(self.project_member_authorized_contexts,
+ self.rule_attach_network,
+ self.controller.create,
+ self.req, body=body)
@mock.patch('nova.compute.api.API.create')
def test_create_trusted_certs_server_policy(self, mock_create):
@@ -504,20 +511,18 @@ class ServersPolicyTest(base.BasePolicyTest):
},
}
- self.common_policy_check(self.project_member_authorized_contexts,
- self.project_member_unauthorized_contexts,
- self.rule_trusted_certs,
- self.controller.create,
- req, body=body)
+ self.common_policy_auth(self.project_member_authorized_contexts,
+ self.rule_trusted_certs,
+ self.controller.create,
+ req, body=body)
@mock.patch('nova.compute.api.API.delete')
def test_delete_server_policy(self, mock_delete):
rule_name = policies.SERVERS % 'delete'
- self.common_policy_check(self.admin_or_owner_authorized_contexts,
- self.admin_or_owner_unauthorized_contexts,
- rule_name,
- self.controller.delete,
- self.req, self.instance.uuid)
+ self.common_policy_auth(self.project_action_authorized_contexts,
+ rule_name,
+ self.controller.delete,
+ self.req, self.instance.uuid)
def test_delete_server_policy_failed_with_other_user(self):
# Change the user_id in request context.
@@ -547,11 +552,10 @@ class ServersPolicyTest(base.BasePolicyTest):
rule_name = policies.SERVERS % 'update'
body = {'server': {'name': 'test'}}
- self.common_policy_check(self.admin_or_owner_authorized_contexts,
- self.admin_or_owner_unauthorized_contexts,
- rule_name,
- self.controller.update,
- self.req, self.instance.uuid, body=body)
+ self.common_policy_auth(self.project_action_authorized_contexts,
+ rule_name,
+ self.controller.update,
+ self.req, self.instance.uuid, body=body)
def test_update_server_policy_failed_with_other_user(self):
# Change the user_id in request context.
@@ -582,44 +586,40 @@ class ServersPolicyTest(base.BasePolicyTest):
def test_confirm_resize_server_policy(self, mock_confirm_resize):
rule_name = policies.SERVERS % 'confirm_resize'
- self.common_policy_check(self.admin_or_owner_authorized_contexts,
- self.admin_or_owner_unauthorized_contexts,
- rule_name,
- self.controller._action_confirm_resize,
- self.req, self.instance.uuid,
- body={'confirmResize': 'null'})
+ self.common_policy_auth(self.project_action_authorized_contexts,
+ rule_name,
+ self.controller._action_confirm_resize,
+ self.req, self.instance.uuid,
+ body={'confirmResize': 'null'})
@mock.patch('nova.compute.api.API.revert_resize')
def test_revert_resize_server_policy(self, mock_revert_resize):
rule_name = policies.SERVERS % 'revert_resize'
- self.common_policy_check(self.admin_or_owner_authorized_contexts,
- self.admin_or_owner_unauthorized_contexts,
- rule_name,
- self.controller._action_revert_resize,
- self.req, self.instance.uuid,
- body={'revertResize': 'null'})
+ self.common_policy_auth(self.project_action_authorized_contexts,
+ rule_name,
+ self.controller._action_revert_resize,
+ self.req, self.instance.uuid,
+ body={'revertResize': 'null'})
@mock.patch('nova.compute.api.API.reboot')
def test_reboot_server_policy(self, mock_reboot):
rule_name = policies.SERVERS % 'reboot'
- self.common_policy_check(self.admin_or_owner_authorized_contexts,
- self.admin_or_owner_unauthorized_contexts,
- rule_name,
- self.controller._action_reboot,
- self.req, self.instance.uuid,
- body={'reboot': {'type': 'soft'}})
+ self.common_policy_auth(self.project_action_authorized_contexts,
+ rule_name,
+ self.controller._action_reboot,
+ self.req, self.instance.uuid,
+ body={'reboot': {'type': 'soft'}})
@mock.patch('nova.compute.api.API.resize')
def test_resize_server_policy(self, mock_resize):
rule_name = policies.SERVERS % 'resize'
- self.common_policy_check(self.admin_or_owner_authorized_contexts,
- self.admin_or_owner_unauthorized_contexts,
- rule_name,
- self.controller._action_resize,
- self.req, self.instance.uuid,
- body={'resize': {'flavorRef': 'f1'}})
+ self.common_policy_auth(self.project_action_authorized_contexts,
+ rule_name,
+ self.controller._action_resize,
+ self.req, self.instance.uuid,
+ body={'resize': {'flavorRef': 'f1'}})
def test_resize_server_policy_failed_with_other_user(self):
# Change the user_id in request context.
@@ -650,23 +650,21 @@ class ServersPolicyTest(base.BasePolicyTest):
def test_start_server_policy(self, mock_start):
rule_name = policies.SERVERS % 'start'
- self.common_policy_check(self.admin_or_owner_authorized_contexts,
- self.admin_or_owner_unauthorized_contexts,
- rule_name,
- self.controller._start_server,
- self.req, self.instance.uuid,
- body={'os-start': 'null'})
+ self.common_policy_auth(self.project_action_authorized_contexts,
+ rule_name,
+ self.controller._start_server,
+ self.req, self.instance.uuid,
+ body={'os-start': 'null'})
@mock.patch('nova.compute.api.API.stop')
def test_stop_server_policy(self, mock_stop):
rule_name = policies.SERVERS % 'stop'
- self.common_policy_check(self.admin_or_owner_authorized_contexts,
- self.admin_or_owner_unauthorized_contexts,
- rule_name,
- self.controller._stop_server,
- self.req, self.instance.uuid,
- body={'os-stop': 'null'})
+ self.common_policy_auth(self.project_action_authorized_contexts,
+ rule_name,
+ self.controller._stop_server,
+ self.req, self.instance.uuid,
+ body={'os-stop': 'null'})
def test_stop_server_policy_failed_with_other_user(self):
# Change the user_id in request context.
@@ -696,12 +694,11 @@ class ServersPolicyTest(base.BasePolicyTest):
@mock.patch('nova.compute.api.API.rebuild')
def test_rebuild_server_policy(self, mock_rebuild):
rule_name = policies.SERVERS % 'rebuild'
- self.common_policy_check(self.admin_or_owner_authorized_contexts,
- self.admin_or_owner_unauthorized_contexts,
- rule_name,
- self.controller._action_rebuild,
- self.req, self.instance.uuid,
- body={'rebuild': {"imageRef": uuids.fake_id}})
+ self.common_policy_auth(self.project_action_authorized_contexts,
+ rule_name,
+ self.controller._action_rebuild,
+ self.req, self.instance.uuid,
+ body={'rebuild': {"imageRef": uuids.fake_id}})
def test_rebuild_server_policy_failed_with_other_user(self):
# Change the user_id in request context.
@@ -743,11 +740,17 @@ class ServersPolicyTest(base.BasePolicyTest):
'trusted_image_certificates': [uuids.fake_id],
},
}
- self.common_policy_check(self.admin_or_owner_authorized_contexts,
- self.admin_or_owner_unauthorized_contexts,
- rule_name,
- self.controller._action_rebuild,
- req, self.instance.uuid, body=body)
+
+ if not CONF.oslo_policy.enforce_scope:
+ check_rule = rule_name
+ else:
+ check_rule = functools.partial(
+ base.rule_if_system, rule, rule_name)
+
+ self.common_policy_auth(self.project_action_authorized_contexts,
+ check_rule,
+ self.controller._action_rebuild,
+ req, self.instance.uuid, body=body)
def test_rebuild_trusted_certs_policy_failed_with_other_user(self):
# Change the user_id in request context.
@@ -796,12 +799,11 @@ class ServersPolicyTest(base.BasePolicyTest):
def test_create_image_server_policy(self, mock_snapshot, mock_image,
mock_bdm):
rule_name = policies.SERVERS % 'create_image'
- self.common_policy_check(self.admin_or_owner_authorized_contexts,
- self.admin_or_owner_unauthorized_contexts,
- rule_name,
- self.controller._action_create_image,
- self.req, self.instance.uuid,
- body={'createImage': {"name": 'test'}})
+ self.common_policy_auth(self.project_action_authorized_contexts,
+ rule_name,
+ self.controller._action_create_image,
+ self.req, self.instance.uuid,
+ body={'createImage': {"name": 'test'}})
@mock.patch('nova.objects.BlockDeviceMappingList.get_by_instance_uuid')
@mock.patch('nova.image.glance.API.generate_image_url')
@@ -816,23 +818,26 @@ class ServersPolicyTest(base.BasePolicyTest):
self.policy.set_rules({rule: "@"}, overwrite=False)
rule_name = policies.SERVERS % 'create_image:allow_volume_backed'
- self.common_policy_check(self.admin_or_owner_authorized_contexts,
- self.admin_or_owner_unauthorized_contexts,
- rule_name,
- self.controller._action_create_image,
- self.req, self.instance.uuid,
- body={'createImage': {"name": 'test'}})
+ if not CONF.oslo_policy.enforce_scope:
+ check_rule = rule_name
+ else:
+ check_rule = functools.partial(
+ base.rule_if_system, rule, rule_name)
+ self.common_policy_auth(self.project_action_authorized_contexts,
+ check_rule,
+ self.controller._action_create_image,
+ self.req, self.instance.uuid,
+ body={'createImage': {"name": 'test'}})
@mock.patch('nova.compute.api.API.trigger_crash_dump')
def test_trigger_crash_dump_server_policy(self, mock_crash):
rule_name = policies.SERVERS % 'trigger_crash_dump'
req = fakes.HTTPRequest.blank('', version='2.17')
- self.common_policy_check(self.admin_or_owner_authorized_contexts,
- self.admin_or_owner_unauthorized_contexts,
- rule_name,
- self.controller._action_trigger_crash_dump,
- req, self.instance.uuid,
- body={'trigger_crash_dump': None})
+ self.common_policy_auth(self.project_action_authorized_contexts,
+ rule_name,
+ self.controller._action_trigger_crash_dump,
+ req, self.instance.uuid,
+ body={'trigger_crash_dump': None})
def test_trigger_crash_dump_policy_failed_with_other_user(self):
# Change the user_id in request context.
@@ -876,9 +881,8 @@ class ServersPolicyTest(base.BasePolicyTest):
self.policy.set_rules({rule: "@"}, overwrite=False)
req = fakes.HTTPRequest.blank('', version='2.3')
rule_name = ea_policies.BASE_POLICY_NAME
- authorize_res, unauthorize_res = self.common_policy_check(
- self.server_attr_admin_authorized_contexts,
- self.server_attr_admin_unauthorized_contexts,
+ authorize_res, unauthorize_res = self.common_policy_auth(
+ self.all_projects_admin_authorized_contexts,
rule_name, self.controller.detail, req,
fatal=False)
for attr in self.extended_attr:
@@ -897,9 +901,8 @@ class ServersPolicyTest(base.BasePolicyTest):
self.policy.set_rules({rule: "@"}, overwrite=False)
req = fakes.HTTPRequest.blank('', version='2.3')
rule_name = ea_policies.BASE_POLICY_NAME
- authorize_res, unauthorize_res = self.common_policy_check(
- self.server_attr_admin_authorized_contexts,
- self.server_attr_admin_unauthorized_contexts,
+ authorize_res, unauthorize_res = self.common_policy_auth(
+ self.project_admin_authorized_contexts,
rule_name, self.controller.show, req, 'fake',
fatal=False)
for attr in self.extended_attr:
@@ -920,9 +923,8 @@ class ServersPolicyTest(base.BasePolicyTest):
self.policy.set_rules({rule: "@"}, overwrite=False)
req = fakes.HTTPRequest.blank('', version='2.75')
rule_name = ea_policies.BASE_POLICY_NAME
- authorize_res, unauthorize_res = self.common_policy_check(
- self.server_attr_admin_authorized_contexts,
- self.server_attr_admin_unauthorized_contexts,
+ authorize_res, unauthorize_res = self.common_policy_auth(
+ self.project_admin_authorized_contexts,
rule_name, self.controller._action_rebuild,
req, self.instance.uuid,
body={'rebuild': {"imageRef": uuids.fake_id}},
@@ -940,8 +942,11 @@ class ServersPolicyTest(base.BasePolicyTest):
@mock.patch('nova.objects.BlockDeviceMappingList.bdms_by_instance_uuid')
@mock.patch.object(InstanceGroup, 'get_by_instance_uuid')
@mock.patch('nova.compute.api.API.update_instance')
+ @mock.patch('nova.compute.api.API.get_instance_host_status')
def test_server_update_with_extended_attr_policy(self,
- mock_update, mock_group, mock_bdm):
+ mock_status, mock_update, mock_group, mock_bdm):
+ mock_update.return_value = self.instance
+ mock_status.return_value = fields.HostStatus.UP
rule = policies.SERVERS % 'update'
# server 'update' policy is checked before extended attributes
# policy so we have to allow it for everyone otherwise it will fail
@@ -949,9 +954,8 @@ class ServersPolicyTest(base.BasePolicyTest):
self.policy.set_rules({rule: "@"}, overwrite=False)
req = fakes.HTTPRequest.blank('', version='2.75')
rule_name = ea_policies.BASE_POLICY_NAME
- authorize_res, unauthorize_res = self.common_policy_check(
- self.server_attr_admin_authorized_contexts,
- self.server_attr_admin_unauthorized_contexts,
+ authorize_res, unauthorize_res = self.common_policy_auth(
+ self.project_admin_authorized_contexts,
rule_name, self.controller.update,
req, self.instance.uuid,
body={'server': {'name': 'test'}},
@@ -977,9 +981,8 @@ class ServersPolicyTest(base.BasePolicyTest):
self.policy.set_rules({rule: "@"}, overwrite=False)
req = fakes.HTTPRequest.blank('', version='2.16')
rule_name = policies.SERVERS % 'show:host_status'
- authorize_res, unauthorize_res = self.common_policy_check(
- self.server_attr_admin_authorized_contexts,
- self.server_attr_admin_unauthorized_contexts,
+ authorize_res, unauthorize_res = self.common_policy_auth(
+ self.all_projects_admin_authorized_contexts,
rule_name, self.controller.detail, req,
fatal=False)
for resp in authorize_res:
@@ -998,9 +1001,8 @@ class ServersPolicyTest(base.BasePolicyTest):
self.policy.set_rules({rule: "@"}, overwrite=False)
req = fakes.HTTPRequest.blank('', version='2.16')
rule_name = policies.SERVERS % 'show:host_status'
- authorize_res, unauthorize_res = self.common_policy_check(
- self.server_attr_admin_authorized_contexts,
- self.server_attr_admin_unauthorized_contexts,
+ authorize_res, unauthorize_res = self.common_policy_auth(
+ self.project_admin_authorized_contexts,
rule_name, self.controller.show, req, 'fake',
fatal=False)
for resp in authorize_res:
@@ -1020,9 +1022,8 @@ class ServersPolicyTest(base.BasePolicyTest):
self.policy.set_rules({rule: "@"}, overwrite=False)
req = fakes.HTTPRequest.blank('', version='2.75')
rule_name = policies.SERVERS % 'show:host_status'
- authorize_res, unauthorize_res = self.common_policy_check(
- self.server_attr_admin_authorized_contexts,
- self.server_attr_admin_unauthorized_contexts,
+ authorize_res, unauthorize_res = self.common_policy_auth(
+ self.project_admin_authorized_contexts,
rule_name, self.controller._action_rebuild,
req, self.instance.uuid,
body={'rebuild': {"imageRef": uuids.fake_id}},
@@ -1035,8 +1036,11 @@ class ServersPolicyTest(base.BasePolicyTest):
@mock.patch('nova.objects.BlockDeviceMappingList.bdms_by_instance_uuid')
@mock.patch.object(InstanceGroup, 'get_by_instance_uuid')
@mock.patch('nova.compute.api.API.update_instance')
+ @mock.patch('nova.compute.api.API.get_instance_host_status')
def test_server_update_with_host_status_policy(self,
- mock_update, mock_group, mock_bdm):
+ mock_status, mock_update, mock_group, mock_bdm):
+ mock_update.return_value = self.instance
+ mock_status.return_value = fields.HostStatus.UP
rule = policies.SERVERS % 'update'
# server 'update' policy is checked before host_status
# policy so we have to allow it for everyone otherwise it will fail
@@ -1044,9 +1048,8 @@ class ServersPolicyTest(base.BasePolicyTest):
self.policy.set_rules({rule: "@"}, overwrite=False)
req = fakes.HTTPRequest.blank('', version='2.75')
rule_name = policies.SERVERS % 'show:host_status'
- authorize_res, unauthorize_res = self.common_policy_check(
- self.server_attr_admin_authorized_contexts,
- self.server_attr_admin_unauthorized_contexts,
+ authorize_res, unauthorize_res = self.common_policy_auth(
+ self.project_admin_authorized_contexts,
rule_name, self.controller.update,
req, self.instance.uuid,
body={'server': {'name': 'test'}},
@@ -1079,9 +1082,8 @@ class ServersPolicyTest(base.BasePolicyTest):
rule_host_status: "!"}, overwrite=False)
req = fakes.HTTPRequest.blank('', version='2.16')
rule_name = policies.SERVERS % 'show:host_status:unknown-only'
- authorize_res, unauthorize_res = self.common_policy_check(
- self.server_attr_admin_authorized_contexts,
- self.server_attr_admin_unauthorized_contexts,
+ authorize_res, unauthorize_res = self.common_policy_auth(
+ self.all_projects_admin_authorized_contexts,
rule_name, self.controller.detail, req,
fatal=False)
for resp in authorize_res:
@@ -1107,9 +1109,8 @@ class ServersPolicyTest(base.BasePolicyTest):
rule_host_status: "!"}, overwrite=False)
req = fakes.HTTPRequest.blank('', version='2.16')
rule_name = policies.SERVERS % 'show:host_status:unknown-only'
- authorize_res, unauthorize_res = self.common_policy_check(
- self.server_attr_admin_authorized_contexts,
- self.server_attr_admin_unauthorized_contexts,
+ authorize_res, unauthorize_res = self.common_policy_auth(
+ self.project_admin_authorized_contexts,
rule_name, self.controller.show, req, 'fake',
fatal=False)
for resp in authorize_res:
@@ -1136,9 +1137,8 @@ class ServersPolicyTest(base.BasePolicyTest):
rule_host_status: "!"}, overwrite=False)
req = fakes.HTTPRequest.blank('', version='2.75')
rule_name = policies.SERVERS % 'show:host_status:unknown-only'
- authorize_res, unauthorize_res = self.common_policy_check(
- self.server_attr_admin_authorized_contexts,
- self.server_attr_admin_unauthorized_contexts,
+ authorize_res, unauthorize_res = self.common_policy_auth(
+ self.project_admin_authorized_contexts,
rule_name, self.controller._action_rebuild,
req, self.instance.uuid,
body={'rebuild': {"imageRef": uuids.fake_id}},
@@ -1156,6 +1156,7 @@ class ServersPolicyTest(base.BasePolicyTest):
@mock.patch('nova.compute.api.API.update_instance')
def test_server_update_with_unknown_host_status_policy(self,
mock_update, mock_group, mock_status, mock_bdm):
+ mock_update.return_value = self.instance
mock_status.return_value = fields.HostStatus.UNKNOWN
rule = policies.SERVERS % 'update'
# server 'update' policy is checked before unknown host_status
@@ -1168,9 +1169,8 @@ class ServersPolicyTest(base.BasePolicyTest):
rule_host_status: "!"}, overwrite=False)
req = fakes.HTTPRequest.blank('', version='2.75')
rule_name = policies.SERVERS % 'show:host_status:unknown-only'
- authorize_res, unauthorize_res = self.common_policy_check(
- self.server_attr_admin_authorized_contexts,
- self.server_attr_admin_unauthorized_contexts,
+ authorize_res, unauthorize_res = self.common_policy_auth(
+ self.project_admin_authorized_contexts,
rule_name, self.controller.update,
req, self.instance.uuid,
body={'server': {'name': 'test'}},
@@ -1194,9 +1194,9 @@ class ServersPolicyTest(base.BasePolicyTest):
def fake_create(context, *args, **kwargs):
for attr in ['requested_host', 'requested_hypervisor_hostname']:
- if context in self.project_admin_authorized_contexts:
+ if context in self.all_projects_admin_authorized_contexts:
self.assertIn(attr, kwargs)
- if context in self.project_admin_unauthorized_contexts:
+ if context not in self.all_projects_admin_authorized_contexts:
self.assertNotIn(attr, kwargs)
return ([self.instance], '')
mock_create.side_effect = fake_create
@@ -1214,11 +1214,10 @@ class ServersPolicyTest(base.BasePolicyTest):
},
}
- self.common_policy_check(self.project_admin_authorized_contexts,
- self.project_admin_unauthorized_contexts,
- self.rule_requested_destination,
- self.controller.create,
- req, body=body)
+ self.common_policy_auth(self.all_projects_admin_authorized_contexts,
+ self.rule_requested_destination,
+ self.controller.create,
+ req, body=body)
@mock.patch(
'nova.servicegroup.api.API.service_is_up',
@@ -1230,10 +1229,9 @@ class ServersPolicyTest(base.BasePolicyTest):
@mock.patch('nova.compute.api.API._allow_resize_to_same_host')
@mock.patch('nova.objects.RequestSpec.get_by_instance_uuid')
@mock.patch('nova.objects.Instance.save')
- @mock.patch('nova.api.openstack.common.get_instance')
@mock.patch('nova.conductor.ComputeTaskAPI.resize_instance')
def test_cross_cell_resize_server_policy(
- self, mock_resize, mock_get, mock_save, mock_rs, mock_allow, m_net
+ self, mock_resize, mock_save, mock_rs, mock_allow, m_net
):
# 'migrate' policy is checked before 'resize:cross_cell' so
@@ -1263,13 +1261,13 @@ class ServersPolicyTest(base.BasePolicyTest):
)
return inst
- mock_get.side_effect = fake_get
+ self.mock_get.side_effect = fake_get
def fake_validate(context, instance,
host_name, allow_cross_cell_resize):
if context in self.cross_cell_authorized_contexts:
self.assertTrue(allow_cross_cell_resize)
- if context in self.cross_cell_unauthorized_contexts:
+ if context not in self.cross_cell_authorized_contexts:
self.assertFalse(allow_cross_cell_resize)
return objects.ComputeNode(host=1, hypervisor_hostname=2)
@@ -1277,23 +1275,24 @@ class ServersPolicyTest(base.BasePolicyTest):
'nova.compute.api.API._validate_host_for_cold_migrate',
fake_validate)
- self.common_policy_check(self.cross_cell_authorized_contexts,
- self.cross_cell_unauthorized_contexts,
- rule_name,
- self.m_controller._migrate,
- req, self.instance.uuid,
- body={'migrate': {'host': 'fake'}},
- fatal=False)
+ self.common_policy_auth(self.cross_cell_authorized_contexts,
+ rule_name,
+ self.m_controller._migrate,
+ req, self.instance.uuid,
+ body={'migrate': {'host': 'fake'}},
+ fatal=False)
def test_network_attach_external_network_policy(self):
# NOTE(gmann): Testing policy 'network:attach_external_network'
# which raise different error then PolicyNotAuthorized
# if not allowed.
neutron_api = neutron.API()
- for context in self.zero_disk_external_net_authorized_contexts:
+ for context in self.all_projects_admin_authorized_contexts:
neutron_api._check_external_network_attach(context,
[{'id': 1, 'router:external': 'ext'}])
- for context in self.zero_disk_external_net_unauthorized_contexts:
+ unauth = (set(self.all_contexts) -
+ set(self.all_projects_admin_authorized_contexts))
+ for context in unauth:
self.assertRaises(exception.ExternalNetworkAttachForbidden,
neutron_api._check_external_network_attach,
context, [{'id': 1, 'router:external': 'ext'}])
@@ -1306,16 +1305,63 @@ class ServersPolicyTest(base.BasePolicyTest):
flavor = objects.Flavor(
vcpus=1, memory_mb=512, root_gb=0, extra_specs={'hw:pmu': "true"})
compute_api = compute.API()
- for context in self.zero_disk_external_net_authorized_contexts:
+ for context in self.all_projects_admin_authorized_contexts:
compute_api._validate_flavor_image_nostatus(context,
image, flavor, None)
- for context in self.zero_disk_external_net_unauthorized_contexts:
+ unauth = (set(self.all_contexts) -
+ set(self.all_projects_admin_authorized_contexts))
+ for context in unauth:
self.assertRaises(
exception.BootFromVolumeRequiredForZeroDiskFlavor,
compute_api._validate_flavor_image_nostatus,
context, image, flavor, None)
+class ServersNoLegacyNoScopeTest(ServersPolicyTest):
+ """Test Servers API policies with deprecated rules disabled, but scope
+ checking still disabled.
+ """
+ without_deprecated_rules = True
+ rules_without_deprecation = {
+ policies.SERVERS % 'show:flavor-extra-specs':
+ base_policy.PROJECT_READER_OR_ADMIN,
+ }
+
+ def setUp(self):
+ super(ServersNoLegacyNoScopeTest, self).setUp()
+
+ # Disabling legacy rule support means that we no longer allow
+ # random roles on our project to take action on our
+ # resources. Legacy admin will have access.
+ self.project_action_authorized_contexts = (
+ self.project_member_or_admin_with_no_scope_no_legacy)
+
+ # The only additional role that can read our resources is our
+ # own project_reader.
+ self.project_reader_authorized_contexts = (
+ self.project_reader_or_admin_with_no_scope_no_legacy)
+
+ # Disabling legacy support means random roles lose power to
+ # see everything in their project.
+ self.reduce_set('everyone_authorized',
+ self.all_contexts - set([self.project_foo_context,
+ self.system_foo_context]))
+
+ # Disabling legacy support means readers and random roles lose
+ # power to create things on their own projects. Note that
+ # system_admin and system_member are still here because we are
+ # not rejecting them by scope, even though these operations
+ # with those tokens are likely to fail because they have no
+ # project.
+ self.reduce_set('project_member_authorized',
+ self.all_contexts - set([
+ self.system_reader_context,
+ self.system_foo_context,
+ self.project_reader_context,
+ self.project_foo_context,
+ self.other_project_reader_context]))
+
+
class ServersScopeTypePolicyTest(ServersPolicyTest):
"""Test Servers APIs policies with system scope enabled.
This class set the nova.conf [oslo_policy] enforce_scope to True
@@ -1342,143 +1388,77 @@ class ServersScopeTypePolicyTest(ServersPolicyTest):
self.rule_requested_destination = None
self.rule_forced_host = None
- # Check that system admin is able to create server with host request
- # and get server extended attributes or host status.
- self.admin_authorized_contexts = [
- self.system_admin_context
- ]
- # Check that non-system/admin is not able to create server with
- # host request and get server extended attributes or host status.
- self.admin_unauthorized_contexts = [
- self.project_admin_context, self.legacy_admin_context,
- self.system_member_context, self.system_reader_context,
- self.system_foo_context, self.project_member_context,
- self.project_reader_context, self.project_foo_context,
- self.other_project_member_context,
- self.other_project_reader_context
- ]
- # Check that system reader is able to list the server
- # for all projects.
- self.system_reader_authorized_contexts = [
- self.system_admin_context, self.system_member_context,
- self.system_reader_context]
- # Check that non-system reader is not able to list the server
- # for all projects.
- self.system_reader_unauthorized_contexts = [
- self.legacy_admin_context, self.project_admin_context,
- self.system_foo_context, self.project_member_context,
- self.project_reader_context, self.project_foo_context,
- self.other_project_member_context,
- self.other_project_reader_context
- ]
-
- # Check if project member can create the server.
- self.project_member_authorized_contexts = [
- self.legacy_admin_context,
- self.project_admin_context, self.project_member_context,
- self.other_project_member_context,
- self.project_reader_context, self.project_foo_context,
- self.other_project_reader_context
- ]
- # Check if non-project member cannot create the server.
- self.project_member_unauthorized_contexts = [
- self.system_admin_context, self.system_member_context,
- self.system_reader_context, self.system_foo_context
- ]
-
- # Check that project admin is able to create server with requested
- # destination.
- self.project_admin_authorized_contexts = [
- self.legacy_admin_context, self.project_admin_context]
- # Check that non-project admin is not able to create server with
- # requested destination
- self.project_admin_unauthorized_contexts = [
- self.system_admin_context,
- self.system_member_context, self.system_reader_context,
- self.system_foo_context, self.project_member_context,
- self.project_reader_context, self.project_foo_context,
- self.other_project_member_context,
- self.other_project_reader_context
- ]
+ # With scope checking enabled, system admins no longer have
+ # admin-granted project resource access.
+ self.reduce_set('project_action_authorized',
+ set([self.legacy_admin_context,
+ self.project_admin_context,
+ self.project_member_context,
+ self.project_reader_context,
+ self.project_foo_context]))
+
+ # No change from the base behavior here, but we need to
+ # re-build this from project_action_authorized, since we
+ # changed it above.
+ self.project_reader_authorized_contexts = (
+ self.project_action_authorized_contexts)
+
+ # With scope checking enabled, system users no longer have
+ # project access, even to create their own resources.
+ self.reduce_set('project_member_authorized', self.all_project_contexts)
+
+ # With scope checking enabled, system admin is no longer an
+ # admin of project resources.
+ self.reduce_set('project_admin_authorized',
+ set([self.legacy_admin_context,
+ self.project_admin_context]))
+ self.reduce_set('all_projects_admin_authorized',
+ set([self.legacy_admin_context,
+ self.project_admin_context]))
+
+ # With scope checking enabled, system users also lose access to read
+ # project resources.
+ self.reduce_set('everyone_authorized',
+ self.all_contexts - self.all_system_contexts)
class ServersNoLegacyPolicyTest(ServersScopeTypePolicyTest):
"""Test Servers APIs policies with system scope enabled,
- and no more deprecated rules that allow the legacy admin API to
- access system_admin_or_owner APIs.
+ and no more deprecated rules.
"""
without_deprecated_rules = True
+ rules_without_deprecation = {
+ policies.SERVERS % 'show:flavor-extra-specs':
+ base_policy.PROJECT_READER_OR_ADMIN,
+ }
def setUp(self):
super(ServersNoLegacyPolicyTest, self).setUp()
- # Check that system admin or owner is able to update, delete
- # or perform server action.
- self.admin_or_owner_authorized_contexts = [
- self.system_admin_context,
- self.project_admin_context, self.project_member_context,
- ]
- # Check that non-system and non-admin/owner is not able to update,
- # delete or perform server action.
- self.admin_or_owner_unauthorized_contexts = [
- self.legacy_admin_context, self.system_member_context,
- self.system_reader_context, self.project_reader_context,
- self.project_foo_context,
- self.system_foo_context, self.other_project_member_context,
- self.other_project_reader_context]
-
- # Check that system reader or projct owner is able to get
- # server.
- self.system_reader_or_owner_authorized_contexts = [
- self.system_admin_context,
- self.project_admin_context, self.system_member_context,
- self.system_reader_context, self.project_reader_context,
- self.project_member_context,
- ]
-
- # Check that non-system reader nd non-admin/owner is not able to get
- # server.
- self.system_reader_or_owner_unauthorized_contexts = [
- self.legacy_admin_context, self.project_foo_context,
- self.system_foo_context, self.other_project_member_context,
- self.other_project_reader_context
- ]
- self.everyone_authorized_contexts = [
- self.legacy_admin_context, self.system_admin_context,
- self.project_admin_context,
- self.project_member_context, self.project_reader_context,
- self.system_member_context, self.system_reader_context,
- self.other_project_member_context,
- self.other_project_reader_context,
- ]
- self.everyone_unauthorized_contexts = [
- self.project_foo_context,
- self.system_foo_context
- ]
- # Check if project member can create the server.
- self.project_member_authorized_contexts = [
- self.legacy_admin_context, self.project_admin_context,
- self.project_member_context,
- self.other_project_member_context
- ]
- # Check if non-project member cannot create the server.
- self.project_member_unauthorized_contexts = [
- self.system_admin_context,
- self.system_member_context, self.project_reader_context,
- self.project_foo_context, self.other_project_reader_context,
- self.system_reader_context, self.system_foo_context
- ]
- # Check that system admin is able to get server extended attributes
- # or host status.
- self.server_attr_admin_authorized_contexts = [
- self.system_admin_context]
- # Check that non-system admin is not able to get server extended
- # attributes or host status.
- self.server_attr_admin_unauthorized_contexts = [
- self.legacy_admin_context, self.project_admin_context,
- self.system_member_context, self.system_reader_context,
- self.system_foo_context, self.project_member_context,
- self.project_reader_context, self.project_foo_context,
- self.other_project_member_context,
- self.other_project_reader_context
- ]
+ # Disabling legacy support means legacy_admin is no longer
+ # powerful on our project. Also, we drop the "any role on the
+ # project means you can do stuff" behavior, so project_reader
+ # and project_foo lose power.
+ self.project_action_authorized_contexts = (
+ self.project_member_or_admin_with_scope_no_legacy)
+
+ # Only project_reader has additional read access to our
+ # project resources.
+ self.project_reader_authorized_contexts = (
+ self.project_action_authorized_contexts |
+ set([self.project_reader_context]))
+
+ # Disabling legacy support means random roles lose power to
+ # see everything in their project.
+ self.reduce_set(
+ 'everyone_authorized',
+ self.all_project_contexts - set([self.project_foo_context]))
+
+ # Disabling legacy support means readers and random roles lose
+ # power to create things on their own projects.
+ self.reduce_set('project_member_authorized',
+ self.all_project_contexts - set([
+ self.project_foo_context,
+ self.project_reader_context,
+ self.other_project_reader_context,
+ ]))
diff --git a/nova/tests/unit/policies/test_services.py b/nova/tests/unit/policies/test_services.py
index cdca5ebc7f..72465eb748 100644
--- a/nova/tests/unit/policies/test_services.py
+++ b/nova/tests/unit/policies/test_services.py
@@ -11,12 +11,9 @@
# under the License.
-import mock
+from unittest import mock
from nova.api.openstack.compute import services as services_v21
-from nova import exception
-from nova.policies import base as base_policy
-from nova.tests import fixtures
from nova.tests.unit.api.openstack import fakes
from nova.tests.unit.policies import base
@@ -34,64 +31,36 @@ class ServicesPolicyTest(base.BasePolicyTest):
super(ServicesPolicyTest, self).setUp()
self.controller = services_v21.ServiceController()
self.req = fakes.HTTPRequest.blank('/services')
- # Check that admin is able to change the service
- self.admin_authorized_contexts = [
+
+ # With legacy rule and scope check disabled by default, system admin,
+ # legacy admin, and project admin will be able to perform Services
+ # Operations.
+ self.project_admin_authorized_contexts = [
self.legacy_admin_context, self.system_admin_context,
self.project_admin_context]
- # Check that non-admin is not able to change the service
- self.admin_unauthorized_contexts = [
- self.system_member_context, self.system_reader_context,
- self.system_foo_context, self.project_member_context,
- self.other_project_member_context,
- self.other_project_reader_context,
- self.project_foo_context, self.project_reader_context
- ]
-
- # Check that system scoped admin, member and reader are able to
- # read the service data.
- # NOTE(gmann): Until old default rule which is admin_api is
- # deprecated and not removed, project admin and legacy admin
- # will be able to read the service data. This make sure that existing
- # tokens will keep working even we have changed this policy defaults
- # to reader role.
- self.reader_authorized_contexts = [
- self.system_admin_context, self.system_member_context,
- self.system_reader_context, self.legacy_admin_context,
- self.project_admin_context]
- # Check that non-system-reader are not able to read the service
- # data
- self.reader_unauthorized_contexts = [
- self.system_foo_context, self.other_project_member_context,
- self.project_foo_context, self.project_member_context,
- self.project_reader_context,
- self.other_project_reader_context,
- ]
def test_delete_service_policy(self):
rule_name = "os_compute_api:os-services:delete"
with mock.patch('nova.compute.api.HostAPI.service_get_by_id'):
- self.common_policy_check(self.admin_authorized_contexts,
- self.admin_unauthorized_contexts,
- rule_name, self.controller.delete,
- self.req, 1)
+ self.common_policy_auth(self.project_admin_authorized_contexts,
+ rule_name, self.controller.delete,
+ self.req, 1)
def test_index_service_policy(self):
rule_name = "os_compute_api:os-services:list"
with mock.patch('nova.compute.api.HostAPI.service_get_all'):
- self.common_policy_check(self.reader_authorized_contexts,
- self.reader_unauthorized_contexts,
- rule_name, self.controller.index,
- self.req)
+ self.common_policy_auth(self.project_admin_authorized_contexts,
+ rule_name, self.controller.index,
+ self.req)
def test_old_update_service_policy(self):
rule_name = "os_compute_api:os-services:update"
body = {'host': 'host1', 'binary': 'nova-compute'}
update = 'nova.compute.api.HostAPI.service_update_by_host_and_binary'
with mock.patch(update):
- self.common_policy_check(self.admin_authorized_contexts,
- self.admin_unauthorized_contexts,
- rule_name, self.controller.update,
- self.req, 'enable', body=body)
+ self.common_policy_auth(self.project_admin_authorized_contexts,
+ rule_name, self.controller.update,
+ self.req, 'enable', body=body)
def test_update_service_policy(self):
rule_name = "os_compute_api:os-services:update"
@@ -100,11 +69,25 @@ class ServicesPolicyTest(base.BasePolicyTest):
service = self.start_service(
'compute', 'fake-compute-host').service_ref
with mock.patch('nova.compute.api.HostAPI.service_update'):
- self.common_policy_check(self.admin_authorized_contexts,
- self.admin_unauthorized_contexts,
- rule_name, self.controller.update,
- req, service.uuid,
- body={'status': 'enabled'})
+ self.common_policy_auth(self.project_admin_authorized_contexts,
+ rule_name, self.controller.update,
+ req, service.uuid,
+ body={'status': 'enabled'})
+
+
+class ServicesNoLegacyNoScopePolicyTest(ServicesPolicyTest):
+ """Test Services APIs policies with no legacy deprecated rules
+ and no scope checks which means new defaults only. In this case
+ system admin, legacy admin, and project admin will be able to
+ perform Service Operations. Legacy admin will be allowed as policy
+ is just admin if no scope checks.
+
+ """
+
+ without_deprecated_rules = True
+
+ def setUp(self):
+ super(ServicesNoLegacyNoScopePolicyTest, self).setUp()
class ServicesScopeTypePolicyTest(ServicesPolicyTest):
@@ -122,80 +105,16 @@ class ServicesScopeTypePolicyTest(ServicesPolicyTest):
super(ServicesScopeTypePolicyTest, self).setUp()
self.flags(enforce_scope=True, group="oslo_policy")
- # Check that system admin is able to change the service
- self.admin_authorized_contexts = [
- self.system_admin_context]
- # Check that non-system or non-admin is not able to change the service
- self.admin_unauthorized_contexts = [
- self.legacy_admin_context, self.system_member_context,
- self.system_reader_context, self.system_foo_context,
- self.project_admin_context, self.project_member_context,
- self.other_project_member_context,
- self.other_project_reader_context,
- self.project_foo_context, self.project_reader_context
- ]
-
- # Check that system admin, member and reader are able to read the
- # service data
- self.reader_authorized_contexts = [
- self.system_admin_context, self.system_member_context,
- self.system_reader_context]
- # Check that non-system or non-reader are not able to read the service
- # data
- self.reader_unauthorized_contexts = [
- self.system_foo_context, self.legacy_admin_context,
- self.project_admin_context, self.project_member_context,
- self.other_project_member_context,
- self.other_project_reader_context,
- self.project_foo_context, self.project_reader_context
- ]
-
-
-class ServicesDeprecatedPolicyTest(base.BasePolicyTest):
- """Test os-services APIs Deprecated policies.
-
- This class checks if deprecated policy rules are
- overridden by user on policy.yaml file then they
- still work because oslo.policy add deprecated rules
- in logical OR condition and enforce them for policy
- checks if overridden.
+ # With scope checks enable, only system admin is able to perform
+ # Service Operations.
+ self.project_admin_authorized_contexts = [self.legacy_admin_context,
+ self.project_admin_context]
+
+
+class ServicesScopeTypeNoLegacyPolicyTest(ServicesScopeTypePolicyTest):
+ """Test Services APIs policies with no legacy deprecated rules
+ and scope checks enabled which means scope + new defaults so
+ only system admin is able to perform Services Operations.
"""
- def setUp(self):
- super(ServicesDeprecatedPolicyTest, self).setUp()
- self.controller = services_v21.ServiceController()
- self.member_req = fakes.HTTPRequest.blank('')
- self.member_req.environ['nova.context'] = self.system_reader_context
- self.reader_req = fakes.HTTPRequest.blank('')
- self.reader_req.environ['nova.context'] = self.project_reader_context
- self.deprecated_policy = "os_compute_api:os-services"
- # Overridde rule with different checks than defaults so that we can
- # verify the rule overridden case.
- override_rules = {self.deprecated_policy: base_policy.SYSTEM_READER}
- # NOTE(gmann): Only override the deprecated rule in policy file so
- # that
- # we can verify if overridden checks are considered by oslo.policy.
- # Oslo.policy will consider the overridden rules if:
- # 1. overridden deprecated rule's checks are different than defaults
- # 2. new rules are not present in policy file
- self.policy = self.useFixture(fixtures.OverridePolicyFixture(
- rules_in_file=override_rules))
-
- def test_deprecated_policy_overridden_rule_is_checked(self):
- # Test to verify if deprecatd overridden policy is working.
-
- # check for success as member role. Deprecated rule
- # has been overridden with member checks in policy.yaml
- # If member role pass it means overridden rule is enforced by
- # olso.policy because new default is system admin and the old
- # default is admin.
- with mock.patch('nova.compute.api.HostAPI.service_get_by_id'):
- self.controller.index(self.member_req)
-
- # check for failure with reader context.
- exc = self.assertRaises(exception.PolicyNotAuthorized,
- self.controller.index, self.reader_req)
- self.assertEqual(
- "Policy doesn't allow os_compute_api:os-services:list to be"
- " performed.",
- exc.format_message())
+ without_deprecated_rules = True
diff --git a/nova/tests/unit/policies/test_shelve.py b/nova/tests/unit/policies/test_shelve.py
index c4cf3dedbb..052f844c3d 100644
--- a/nova/tests/unit/policies/test_shelve.py
+++ b/nova/tests/unit/policies/test_shelve.py
@@ -10,8 +10,9 @@
# License for the specific language governing permissions and limitations
# under the License.
+from unittest import mock
+
import fixtures
-import mock
from oslo_utils.fixture import uuidsentinel as uuids
from nova.api.openstack.compute import shelve
@@ -43,63 +44,48 @@ class ShelveServerPolicyTest(base.BasePolicyTest):
id=1, uuid=uuids.fake_id, project_id=self.project_id,
user_id=user_id, vm_state=vm_states.ACTIVE)
self.mock_get.return_value = self.instance
-
- # Check that admin or and server owner is able to shelve/unshelve
- # the server
- self.admin_or_owner_authorized_contexts = [
+ # With legacy rule and no scope checks, all admin, project members
+ # project reader or other project role(because legacy rule allow server
+ # owner- having same project id and no role check) is able to shelve,
+ # unshelve the server.
+ self.project_action_authorized_contexts = [
self.legacy_admin_context, self.system_admin_context,
self.project_admin_context, self.project_member_context,
self.project_reader_context, self.project_foo_context]
- # Check that non-admin/owner is not able to shelve/unshelve
- # the server
- self.admin_or_owner_unauthorized_contexts = [
- self.system_member_context, self.system_reader_context,
- self.system_foo_context,
- self.other_project_member_context,
- self.other_project_reader_context,
- ]
- # Check that admin is able to shelve offload the server.
- self.admin_authorized_contexts = [
+
+ # By default, legacy rule are enable and scope check is disabled.
+ # system admin, legacy admin, and project admin is able to shelve
+ # offload the server.
+ self.project_admin_authorized_contexts = [
self.legacy_admin_context, self.system_admin_context,
self.project_admin_context]
- # Check that non-admin is not able to shelve offload the server.
- self.admin_unauthorized_contexts = [
- self.system_member_context, self.system_reader_context,
- self.system_foo_context, self.project_member_context,
- self.project_reader_context, self.project_foo_context,
- self.other_project_member_context,
- self.other_project_reader_context,
- ]
@mock.patch('nova.compute.api.API.shelve')
def test_shelve_server_policy(self, mock_shelve):
rule_name = policies.POLICY_ROOT % 'shelve'
- self.common_policy_check(self.admin_or_owner_authorized_contexts,
- self.admin_or_owner_unauthorized_contexts,
- rule_name,
- self.controller._shelve,
- self.req, self.instance.uuid,
- body={'shelve': {}})
+ self.common_policy_auth(self.project_action_authorized_contexts,
+ rule_name,
+ self.controller._shelve,
+ self.req, self.instance.uuid,
+ body={'shelve': {}})
@mock.patch('nova.compute.api.API.unshelve')
def test_unshelve_server_policy(self, mock_unshelve):
rule_name = policies.POLICY_ROOT % 'unshelve'
- self.common_policy_check(self.admin_or_owner_authorized_contexts,
- self.admin_or_owner_unauthorized_contexts,
- rule_name,
- self.controller._unshelve,
- self.req, self.instance.uuid,
- body={'unshelve': {}})
+ self.common_policy_auth(self.project_action_authorized_contexts,
+ rule_name,
+ self.controller._unshelve,
+ self.req, self.instance.uuid,
+ body={'unshelve': {}})
@mock.patch('nova.compute.api.API.shelve_offload')
def test_shelve_offload_server_policy(self, mock_offload):
rule_name = policies.POLICY_ROOT % 'shelve_offload'
- self.common_policy_check(self.admin_authorized_contexts,
- self.admin_unauthorized_contexts,
- rule_name,
- self.controller._shelve_offload,
- self.req, self.instance.uuid,
- body={'shelveOffload': {}})
+ self.common_policy_auth(self.project_admin_authorized_contexts,
+ rule_name,
+ self.controller._shelve_offload,
+ self.req, self.instance.uuid,
+ body={'shelveOffload': {}})
def test_shelve_server_policy_failed_with_other_user(self):
# Change the user_id in request context.
@@ -115,7 +101,7 @@ class ShelveServerPolicyTest(base.BasePolicyTest):
exc.format_message())
@mock.patch('nova.compute.api.API.shelve')
- def test_shelve_sevrer_overridden_policy_pass_with_same_user(
+ def test_shelve_server_overridden_policy_pass_with_same_user(
self, mock_shelve):
rule_name = policies.POLICY_ROOT % 'shelve'
self.policy.set_rules({rule_name: "user_id:%(user_id)s"})
@@ -124,6 +110,22 @@ class ShelveServerPolicyTest(base.BasePolicyTest):
body={'shelve': {}})
+class ShelveServerNoLegacyNoScopePolicyTest(ShelveServerPolicyTest):
+ """Test shelve/unshelve server APIs policies with no legacy deprecated
+ rules and no scope checks which means new defaults only.
+
+ """
+ without_deprecated_rules = True
+
+ def setUp(self):
+ super(ShelveServerNoLegacyNoScopePolicyTest, self).setUp()
+ # With no legacy rule, only project admin or member will be
+ # able to shelve/unshelve the server and only project admin can
+ # shelve offload the server.
+ self.project_action_authorized_contexts = (
+ self.project_member_or_admin_with_no_scope_no_legacy)
+
+
class ShelveServerScopeTypePolicyTest(ShelveServerPolicyTest):
"""Test Shelve Server APIs policies with system scope enabled.
This class set the nova.conf [oslo_policy] enforce_scope to True
@@ -137,41 +139,23 @@ class ShelveServerScopeTypePolicyTest(ShelveServerPolicyTest):
def setUp(self):
super(ShelveServerScopeTypePolicyTest, self).setUp()
self.flags(enforce_scope=True, group="oslo_policy")
+ # Scope enable will not allow system admin to shelve/unshelve the
+ # server.
+ self.project_action_authorized_contexts = (
+ self.project_m_r_or_admin_with_scope_and_legacy)
+ self.project_admin_authorized_contexts = [
+ self.legacy_admin_context, self.project_admin_context]
-class ShelveServerNoLegacyPolicyTest(ShelveServerScopeTypePolicyTest):
+class ShelveServerScopeTypeNoLegacyPolicyTest(ShelveServerScopeTypePolicyTest):
"""Test Shelve Server APIs policies with system scope enabled,
and no more deprecated rules.
"""
without_deprecated_rules = True
def setUp(self):
- super(ShelveServerNoLegacyPolicyTest, self).setUp()
-
- # Check that system admin or and owner is able to shelve/unshelve
- # the server.
- self.admin_or_owner_authorized_contexts = [
- self.system_admin_context,
- self.project_admin_context, self.project_member_context]
- # Check that non-system/admin/owner is not able to shelve/unshelve
- # the server.
- self.admin_or_owner_unauthorized_contexts = [
- self.legacy_admin_context, self.system_member_context,
- self.system_reader_context, self.system_foo_context,
- self.other_project_member_context, self.project_reader_context,
- self.project_foo_context,
- self.other_project_reader_context,
- ]
- # Check that system admin is able to shelve offload the server.
- self.admin_authorized_contexts = [
- self.system_admin_context
- ]
- # Check that non system admin is not able to shelve offload the server
- self.admin_unauthorized_contexts = [
- self.legacy_admin_context, self.project_admin_context,
- self.system_member_context, self.system_reader_context,
- self.system_foo_context, self.project_member_context,
- self.project_reader_context, self.project_foo_context,
- self.other_project_member_context,
- self.other_project_reader_context,
- ]
+ super(ShelveServerScopeTypeNoLegacyPolicyTest, self).setUp()
+ # With scope enable and no legacy rule, only project admin/member
+ # will be able to shelve/unshelve the server.
+ self.project_action_authorized_contexts = (
+ self.project_member_or_admin_with_scope_no_legacy)
diff --git a/nova/tests/unit/policies/test_simple_tenant_usage.py b/nova/tests/unit/policies/test_simple_tenant_usage.py
index 60eecdece8..d6aa7af901 100644
--- a/nova/tests/unit/policies/test_simple_tenant_usage.py
+++ b/nova/tests/unit/policies/test_simple_tenant_usage.py
@@ -10,7 +10,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from nova.api.openstack.compute import simple_tenant_usage
from nova.policies import simple_tenant_usage as policies
@@ -32,47 +32,46 @@ class SimpleTenantUsagePolicyTest(base.BasePolicyTest):
self.req = fakes.HTTPRequest.blank('')
self.controller._get_instances_all_cells = mock.MagicMock()
- # Check that reader(legacy admin) or and owner is able to get
- # the tenant usage statistics for a specific tenant.
- self.reader_or_owner_authorized_contexts = [
+ # Currently any admin can list other project usage.
+ self.project_admin_authorized_contexts = [
+ self.legacy_admin_context, self.system_admin_context,
+ self.project_admin_context]
+ # and project reader can get their usage statistics.
+ self.project_reader_authorized_contexts = [
self.legacy_admin_context, self.system_admin_context,
self.project_admin_context, self.project_member_context,
self.project_reader_context, self.project_foo_context,
- self.system_member_context, self.system_reader_context]
- # Check that non-reader(legacy non-admin) or owner is not able to get
- # the tenant usage statistics for a specific tenant.
- self.reader_or_owner_unauthorized_contexts = [
- self.system_foo_context, self.other_project_member_context,
- self.other_project_reader_context
- ]
- # Check that reader is able to get the tenant usage statistics.
- self.reader_authorized_contexts = [
- self.legacy_admin_context, self.system_admin_context,
- self.project_admin_context, self.system_member_context,
- self.system_reader_context]
- # Check that non-reader is not able to get the tenant usage statistics.
- self.reader_unauthorized_contexts = [
- self.system_foo_context, self.project_member_context,
- self.other_project_member_context,
- self.project_foo_context, self.project_reader_context,
- self.other_project_reader_context
]
def test_index_simple_tenant_usage_policy(self):
rule_name = policies.POLICY_ROOT % 'list'
- self.common_policy_check(self.reader_authorized_contexts,
- self.reader_unauthorized_contexts,
- rule_name,
- self.controller.index,
- self.req)
+ self.common_policy_auth(self.project_admin_authorized_contexts,
+ rule_name,
+ self.controller.index,
+ self.req)
def test_show_simple_tenant_usage_policy(self):
rule_name = policies.POLICY_ROOT % 'show'
- self.common_policy_check(self.reader_or_owner_authorized_contexts,
- self.reader_or_owner_unauthorized_contexts,
- rule_name,
- self.controller.show,
- self.req, self.project_id)
+ self.common_policy_auth(self.project_reader_authorized_contexts,
+ rule_name,
+ self.controller.show,
+ self.req, self.project_id)
+
+
+class SimpleTenantUsageNoLegacyNoScopePolicyTest(SimpleTenantUsagePolicyTest):
+ """Test Simple Tenant Usage APIs policies with no legacy deprecated rules
+ and no scope checks.
+
+ """
+
+ without_deprecated_rules = True
+
+ def setUp(self):
+ super(SimpleTenantUsageNoLegacyNoScopePolicyTest, self).setUp()
+ # With no legacy, project other roles like foo will not be able
+ # to get tenant usage.
+ self.project_reader_authorized_contexts = (
+ self.project_reader_or_admin_with_no_scope_no_legacy)
class SimpleTenantUsageScopeTypePolicyTest(SimpleTenantUsagePolicyTest):
@@ -88,23 +87,14 @@ class SimpleTenantUsageScopeTypePolicyTest(SimpleTenantUsagePolicyTest):
def setUp(self):
super(SimpleTenantUsageScopeTypePolicyTest, self).setUp()
self.flags(enforce_scope=True, group="oslo_policy")
-
- # Check that system reader is able to get the tenant usage statistics.
- self.reader_authorized_contexts = [
- self.system_admin_context, self.system_member_context,
- self.system_reader_context]
- # Check that non-system/reader is not able to get the tenant usage
- # statistics.
- self.reader_unauthorized_contexts = [
- self.legacy_admin_context, self.system_foo_context,
- self.project_admin_context, self.project_member_context,
- self.other_project_member_context,
- self.project_foo_context, self.project_reader_context,
- self.other_project_reader_context
- ]
+ # With Scope enable, system users no longer allowed.
+ self.project_admin_authorized_contexts = [
+ self.legacy_admin_context, self.project_admin_context]
+ self.project_reader_authorized_contexts = (
+ self.project_m_r_or_admin_with_scope_and_legacy)
-class SimpleTenantUsageNoLegacyPolicyTest(
+class SimpleTenantUsageScopeTypeNoLegacyPolicyTest(
SimpleTenantUsageScopeTypePolicyTest):
"""Test Simple Tenant Usage APIs policies with system scope enabled,
and no more deprecated rules that allow the legacy admin API to
@@ -113,17 +103,6 @@ class SimpleTenantUsageNoLegacyPolicyTest(
without_deprecated_rules = True
def setUp(self):
- super(SimpleTenantUsageNoLegacyPolicyTest, self).setUp()
- # Check that system reader or owner is able to get
- # the tenant usage statistics for a specific tenant.
- self.reader_or_owner_authorized_contexts = [
- self.system_admin_context, self.system_member_context,
- self.system_reader_context, self.project_admin_context,
- self.project_member_context, self.project_reader_context]
- # Check that non-system reader/owner is not able to get
- # the tenant usage statistics for a specific tenant.
- self.reader_or_owner_unauthorized_contexts = [
- self.legacy_admin_context, self.system_foo_context,
- self.other_project_member_context,
- self.project_foo_context, self.other_project_reader_context
- ]
+ super(SimpleTenantUsageScopeTypeNoLegacyPolicyTest, self).setUp()
+ self.project_reader_authorized_contexts = (
+ self.project_reader_or_admin_with_scope_no_legacy)
diff --git a/nova/tests/unit/policies/test_suspend_server.py b/nova/tests/unit/policies/test_suspend_server.py
index ecf0ebb9ab..7d3cde2799 100644
--- a/nova/tests/unit/policies/test_suspend_server.py
+++ b/nova/tests/unit/policies/test_suspend_server.py
@@ -10,8 +10,9 @@
# License for the specific language governing permissions and limitations
# under the License.
+from unittest import mock
+
import fixtures
-import mock
from oslo_utils.fixture import uuidsentinel as uuids
from nova.api.openstack.compute import suspend_server
@@ -44,40 +45,32 @@ class SuspendServerPolicyTest(base.BasePolicyTest):
user_id=user_id, vm_state=vm_states.ACTIVE)
self.mock_get.return_value = self.instance
- # Check that admin or and server owner is able to suspend/resume
- # the server
- self.admin_or_owner_authorized_contexts = [
+ # With legacy rule and no scope checks, all admin, project members
+ # project reader or other project role(because legacy rule allow server
+ # owner- having same project id and no role check) is able to suspend
+ # resume the server.
+ self.project_action_authorized_contexts = [
self.legacy_admin_context, self.system_admin_context,
self.project_admin_context, self.project_member_context,
self.project_reader_context, self.project_foo_context]
- # Check that non-admin/owner is not able to suspend/resume
- # the server
- self.admin_or_owner_unauthorized_contexts = [
- self.system_member_context, self.system_reader_context,
- self.system_foo_context,
- self.other_project_member_context,
- self.other_project_reader_context,
- ]
@mock.patch('nova.compute.api.API.suspend')
def test_suspend_server_policy(self, mock_suspend):
rule_name = policies.POLICY_ROOT % 'suspend'
- self.common_policy_check(self.admin_or_owner_authorized_contexts,
- self.admin_or_owner_unauthorized_contexts,
- rule_name,
- self.controller._suspend,
- self.req, self.instance.uuid,
- body={'suspend': {}})
+ self.common_policy_auth(self.project_action_authorized_contexts,
+ rule_name,
+ self.controller._suspend,
+ self.req, self.instance.uuid,
+ body={'suspend': {}})
@mock.patch('nova.compute.api.API.resume')
def test_resume_server_policy(self, mock_resume):
rule_name = policies.POLICY_ROOT % 'resume'
- self.common_policy_check(self.admin_or_owner_authorized_contexts,
- self.admin_or_owner_unauthorized_contexts,
- rule_name,
- self.controller._resume,
- self.req, self.instance.uuid,
- body={'resume': {}})
+ self.common_policy_auth(self.project_action_authorized_contexts,
+ rule_name,
+ self.controller._resume,
+ self.req, self.instance.uuid,
+ body={'resume': {}})
def test_suspend_server_policy_failed_with_other_user(self):
# Change the user_id in request context.
@@ -102,6 +95,22 @@ class SuspendServerPolicyTest(base.BasePolicyTest):
body={'suspend': {}})
+class SuspendServerNoLegacyNoScopePolicyTest(SuspendServerPolicyTest):
+ """Test suspend server APIs policies with no legacy deprecated rules
+ and no scope checks which means new defaults only.
+
+ """
+
+ without_deprecated_rules = True
+
+ def setUp(self):
+ super(SuspendServerNoLegacyNoScopePolicyTest, self).setUp()
+ # With no legacy rule, only project admin or member will be
+ # able to suspend/resume the server.
+ self.project_action_authorized_contexts = (
+ self.project_member_or_admin_with_no_scope_no_legacy)
+
+
class SuspendServerScopeTypePolicyTest(SuspendServerPolicyTest):
"""Test Suspend Server APIs policies with system scope enabled.
This class set the nova.conf [oslo_policy] enforce_scope to True
@@ -115,28 +124,22 @@ class SuspendServerScopeTypePolicyTest(SuspendServerPolicyTest):
def setUp(self):
super(SuspendServerScopeTypePolicyTest, self).setUp()
self.flags(enforce_scope=True, group="oslo_policy")
+ # Scope enable will not allow system admin to suspend/resume server.
+ self.project_action_authorized_contexts = (
+ self.project_m_r_or_admin_with_scope_and_legacy)
-class SuspendServerNoLegacyPolicyTest(SuspendServerScopeTypePolicyTest):
- """Test Suspend Server APIs policies with system scope enabled,
- and no more deprecated rules that allow the legacy admin API to
- access system APIs.
+class SuspendServerScopeTypeNoLegacyTest(SuspendServerScopeTypePolicyTest):
+ """Test suspend/resume server APIs policies with system scope enabled,
+ and no more deprecated rules which means scope + new defaults so
+ only project admin and member is able to suspend/resume server.
"""
+
without_deprecated_rules = True
def setUp(self):
- super(SuspendServerNoLegacyPolicyTest, self).setUp()
- # Check that system admin or and server owner is able to
- # suspend/resume the server.
- self.admin_or_owner_authorized_contexts = [
- self.system_admin_context,
- self.project_admin_context, self.project_member_context]
- # Check that non-system/admin/owner is not able to suspend/resume
- # the server.
- self.admin_or_owner_unauthorized_contexts = [
- self.legacy_admin_context, self.system_member_context,
- self.system_reader_context, self.system_foo_context,
- self.other_project_member_context, self.project_reader_context,
- self.project_foo_context,
- self.other_project_reader_context,
- ]
+ super(SuspendServerScopeTypeNoLegacyTest, self).setUp()
+ # With scope enable and no legacy rule only project admin/member
+ # will be able to suspend/resume the server.
+ self.project_action_authorized_contexts = (
+ self.project_member_or_admin_with_scope_no_legacy)
diff --git a/nova/tests/unit/policies/test_tenant_networks.py b/nova/tests/unit/policies/test_tenant_networks.py
index 12e8731582..a5bc614902 100644
--- a/nova/tests/unit/policies/test_tenant_networks.py
+++ b/nova/tests/unit/policies/test_tenant_networks.py
@@ -10,10 +10,13 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from oslo_utils.fixture import uuidsentinel as uuids
from nova.api.openstack.compute import tenant_networks
+from nova.policies import base as base_policy
+from nova.policies import tenant_networks as policies
from nova.tests.unit.api.openstack import fakes
from nova.tests.unit.policies import base
@@ -36,7 +39,7 @@ class TenantNetworksPolicyTest(base.BasePolicyTest):
# enforcement so will be passing context's project_id as target to
# policy and always pass. If requester is not admin or owner
# of networks then neutron will be returning the appropriate error.
- self.everyone_authorized_contexts = [
+ self.project_reader_authorized_contexts = [
self.legacy_admin_context, self.system_admin_context,
self.project_admin_context, self.project_member_context,
self.project_reader_context, self.project_foo_context,
@@ -45,23 +48,46 @@ class TenantNetworksPolicyTest(base.BasePolicyTest):
self.system_foo_context,
self.other_project_member_context
]
- self.everyone_unauthorized_contexts = []
@mock.patch('nova.network.neutron.API.get_all')
def test_list_tenant_networks_policy(self, mock_get):
- rule_name = "os_compute_api:os-tenant-networks"
- self.common_policy_check(self.everyone_authorized_contexts,
- self.everyone_unauthorized_contexts,
- rule_name, self.controller.index,
- self.req)
+ rule_name = "os_compute_api:os-tenant-networks:list"
+ self.common_policy_auth(self.project_reader_authorized_contexts,
+ rule_name, self.controller.index,
+ self.req)
@mock.patch('nova.network.neutron.API.get')
def test_show_tenant_network_policy(self, mock_get):
- rule_name = "os_compute_api:os-tenant-networks"
- self.common_policy_check(self.everyone_authorized_contexts,
- self.everyone_unauthorized_contexts,
- rule_name, self.controller.show,
- self.req, uuids.fake_id)
+ rule_name = "os_compute_api:os-tenant-networks:show"
+ self.common_policy_auth(self.project_reader_authorized_contexts,
+ rule_name, self.controller.show,
+ self.req, uuids.fake_id)
+
+
+class TenantNetworksNoLegacyNoScopePolicyTest(TenantNetworksPolicyTest):
+ """Test Tenant Networks APIs policies with no legacy deprecated rules
+ and no scope checks.
+
+ """
+ without_deprecated_rules = True
+ rules_without_deprecation = {
+ policies.POLICY_NAME % 'list':
+ base_policy.PROJECT_READER_OR_ADMIN,
+ policies.POLICY_NAME % 'show':
+ base_policy.PROJECT_READER_OR_ADMIN}
+
+ def setUp(self):
+ super(TenantNetworksNoLegacyNoScopePolicyTest, self).setUp()
+ # With no legacy, project other roles like foo will not be able
+ # to get tenant network.
+ self.project_reader_authorized_contexts = [
+ self.legacy_admin_context, self.system_admin_context,
+ self.project_admin_context, self.project_member_context,
+ self.project_reader_context,
+ self.other_project_reader_context,
+ self.system_member_context, self.system_reader_context,
+ self.other_project_member_context
+ ]
class TenantNetworksScopeTypePolicyTest(TenantNetworksPolicyTest):
@@ -78,3 +104,31 @@ class TenantNetworksScopeTypePolicyTest(TenantNetworksPolicyTest):
def setUp(self):
super(TenantNetworksScopeTypePolicyTest, self).setUp()
self.flags(enforce_scope=True, group="oslo_policy")
+ self.project_reader_authorized_contexts = [
+ self.legacy_admin_context, self.project_admin_context,
+ self.project_member_context, self.project_reader_context,
+ self.project_foo_context, self.other_project_reader_context,
+ self.other_project_member_context
+ ]
+
+
+class TenantNetworksScopeTypeNoLegacyPolicyTest(
+ TenantNetworksScopeTypePolicyTest):
+ """Test Tenant Networks APIs policies with system scope enabled,
+ and no more deprecated rules.
+ """
+ without_deprecated_rules = True
+ rules_without_deprecation = {
+ policies.POLICY_NAME % 'list':
+ base_policy.PROJECT_READER_OR_ADMIN,
+ policies.POLICY_NAME % 'show':
+ base_policy.PROJECT_READER_OR_ADMIN}
+
+ def setUp(self):
+ super(TenantNetworksScopeTypeNoLegacyPolicyTest, self).setUp()
+ self.project_reader_authorized_contexts = [
+ self.legacy_admin_context, self.project_admin_context,
+ self.project_member_context, self.project_reader_context,
+ self.other_project_member_context,
+ self.other_project_reader_context,
+ ]
diff --git a/nova/tests/unit/policies/test_volumes.py b/nova/tests/unit/policies/test_volumes.py
index 4ee33d0694..896881c03f 100644
--- a/nova/tests/unit/policies/test_volumes.py
+++ b/nova/tests/unit/policies/test_volumes.py
@@ -10,8 +10,9 @@
# License for the specific language governing permissions and limitations
# under the License.
+from unittest import mock
+
import fixtures
-import mock
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import timeutils
@@ -20,6 +21,8 @@ from nova.compute import vm_states
from nova import exception
from nova import objects
from nova.objects import block_device as block_device_obj
+from nova.policies import base as base_policy
+from nova.policies import volumes as v_policies
from nova.policies import volumes_attachments as va_policies
from nova.tests.unit.api.openstack import fakes
from nova.tests.unit import fake_block_device
@@ -92,77 +95,50 @@ class VolumeAttachPolicyTest(base.BasePolicyTest):
task_state=None, launched_at=timeutils.utcnow())
self.mock_get.return_value = self.instance
- # Check that admin or owner is able to list/create/show/delete
- # the attached volume.
- self.admin_or_owner_authorized_contexts = [
+ # With legacy rule and no scope checks, all admin, project members
+ # project reader or other project role(because legacy rule allow
+ # resource owner- having same project id and no role check) is
+ # able create/delete/update the volume attachment.
+ self.project_member_authorized_contexts = [
self.legacy_admin_context, self.system_admin_context,
- self.project_admin_context, self.project_foo_context,
- self.project_reader_context, self.project_member_context
- ]
-
- self.admin_or_owner_unauthorized_contexts = [
- self.system_member_context, self.system_reader_context,
- self.system_foo_context,
- self.other_project_member_context,
- self.other_project_reader_context,
- ]
-
- # Check that admin is able to update the attached volume
- self.admin_authorized_contexts = [
- self.legacy_admin_context,
- self.system_admin_context,
- self.project_admin_context
- ]
- # Check that non-admin is not able to update the attached
- # volume
- self.admin_unauthorized_contexts = [
- self.system_member_context,
- self.system_reader_context,
- self.system_foo_context,
- self.project_member_context,
- self.other_project_member_context,
- self.project_foo_context,
- self.project_reader_context,
- self.other_project_reader_context,
- ]
-
- self.reader_authorized_contexts = [
+ self.project_admin_context, self.project_member_context,
+ self.project_reader_context, self.project_foo_context]
+
+ # With legacy rule and no scope checks, all admin, project members
+ # project reader or other project role(because legacy rule allow
+ # resource owner- having same project id and no role check) is
+ # able get the volume attachment.
+ self.project_reader_authorized_contexts = (
+ self.project_member_authorized_contexts)
+
+ # By default, legacy rule are enable and scope check is disabled.
+ # system admin, legacy admin, and project admin is able to update
+ # volume attachment with a different volumeId.
+ self.project_admin_authorized_contexts = [
self.legacy_admin_context, self.system_admin_context,
- self.system_reader_context, self.system_member_context,
- self.project_admin_context, self.project_reader_context,
- self.project_member_context, self.project_foo_context
- ]
-
- self.reader_unauthorized_contexts = [
- self.system_foo_context,
- self.other_project_member_context,
- self.other_project_reader_context,
- ]
+ self.project_admin_context]
@mock.patch.object(objects.BlockDeviceMappingList, 'get_by_instance_uuid')
def test_index_volume_attach_policy(self, mock_get_instance):
rule_name = self.policy_root % "index"
- self.common_policy_check(self.reader_authorized_contexts,
- self.reader_unauthorized_contexts,
- rule_name, self.controller.index,
- self.req, FAKE_UUID)
+ self.common_policy_auth(self.project_reader_authorized_contexts,
+ rule_name, self.controller.index,
+ self.req, FAKE_UUID)
def test_show_volume_attach_policy(self):
rule_name = self.policy_root % "show"
- self.common_policy_check(self.reader_authorized_contexts,
- self.reader_unauthorized_contexts,
- rule_name, self.controller.show,
- self.req, FAKE_UUID, FAKE_UUID_A)
+ self.common_policy_auth(self.project_reader_authorized_contexts,
+ rule_name, self.controller.show,
+ self.req, FAKE_UUID, FAKE_UUID_A)
@mock.patch('nova.compute.api.API.attach_volume')
def test_create_volume_attach_policy(self, mock_attach_volume):
rule_name = self.policy_root % "create"
body = {'volumeAttachment': {'volumeId': FAKE_UUID_B,
'device': '/dev/fake'}}
- self.common_policy_check(self.admin_or_owner_authorized_contexts,
- self.admin_or_owner_unauthorized_contexts,
- rule_name, self.controller.create,
- self.req, FAKE_UUID, body=body)
+ self.common_policy_auth(self.project_member_authorized_contexts,
+ rule_name, self.controller.create,
+ self.req, FAKE_UUID, body=body)
@mock.patch.object(block_device_obj.BlockDeviceMapping, 'save')
def test_update_volume_attach_policy(self, mock_bdm_save):
@@ -171,28 +147,25 @@ class VolumeAttachPolicyTest(base.BasePolicyTest):
body = {'volumeAttachment': {
'volumeId': FAKE_UUID_A,
'delete_on_termination': True}}
- self.common_policy_check(self.admin_or_owner_authorized_contexts,
- self.admin_or_owner_unauthorized_contexts,
- rule_name, self.controller.update,
- req, FAKE_UUID,
- FAKE_UUID_A, body=body)
+ self.common_policy_auth(self.project_member_authorized_contexts,
+ rule_name, self.controller.update,
+ req, FAKE_UUID,
+ FAKE_UUID_A, body=body)
@mock.patch('nova.compute.api.API.detach_volume')
def test_delete_volume_attach_policy(self, mock_detach_volume):
rule_name = self.policy_root % "delete"
- self.common_policy_check(self.admin_or_owner_authorized_contexts,
- self.admin_or_owner_unauthorized_contexts,
- rule_name, self.controller.delete,
- self.req, FAKE_UUID, FAKE_UUID_A)
+ self.common_policy_auth(self.project_member_authorized_contexts,
+ rule_name, self.controller.delete,
+ self.req, FAKE_UUID, FAKE_UUID_A)
@mock.patch('nova.compute.api.API.swap_volume')
def test_swap_volume_attach_policy(self, mock_swap_volume):
rule_name = self.policy_root % "swap"
body = {'volumeAttachment': {'volumeId': FAKE_UUID_B}}
- self.common_policy_check(self.admin_authorized_contexts,
- self.admin_unauthorized_contexts,
- rule_name, self.controller.update,
- self.req, FAKE_UUID, FAKE_UUID_A, body=body)
+ self.common_policy_auth(self.project_admin_authorized_contexts,
+ rule_name, self.controller.update,
+ self.req, FAKE_UUID, FAKE_UUID_A, body=body)
@mock.patch.object(block_device_obj.BlockDeviceMapping, 'save')
@mock.patch('nova.compute.api.API.swap_volume')
@@ -225,14 +198,31 @@ class VolumeAttachPolicyTest(base.BasePolicyTest):
req = fakes.HTTPRequest.blank('', version='2.85')
body = {'volumeAttachment': {'volumeId': FAKE_UUID_B,
'delete_on_termination': True}}
- self.common_policy_check(self.admin_authorized_contexts,
- self.admin_unauthorized_contexts,
- rule_name, self.controller.update,
- req, FAKE_UUID, FAKE_UUID_A, body=body)
+ self.common_policy_auth(self.project_admin_authorized_contexts,
+ rule_name, self.controller.update,
+ req, FAKE_UUID, FAKE_UUID_A, body=body)
mock_swap_volume.assert_called()
mock_bdm_save.assert_called()
+class VolumeAttachNoLegacyNoScopePolicyTest(VolumeAttachPolicyTest):
+ """Test volume attachment APIs policies with no legacy deprecated rules
+ and no scope checks which means new defaults only.
+
+ """
+
+ without_deprecated_rules = True
+
+ def setUp(self):
+ super(VolumeAttachNoLegacyNoScopePolicyTest, self).setUp()
+ # With no legacy rule, only admin, member, or reader will be
+ # able to perform volume attachment operation on its own project.
+ self.project_member_authorized_contexts = (
+ self.project_member_or_admin_with_no_scope_no_legacy)
+ self.project_reader_authorized_contexts = (
+ self.project_reader_or_admin_with_no_scope_no_legacy)
+
+
class VolumeAttachScopeTypePolicyTest(VolumeAttachPolicyTest):
"""Test os-volume-attachments APIs policies with system scope enabled.
@@ -248,77 +238,33 @@ class VolumeAttachScopeTypePolicyTest(VolumeAttachPolicyTest):
super(VolumeAttachScopeTypePolicyTest, self).setUp()
self.flags(enforce_scope=True, group="oslo_policy")
- # Check that system admin is able to update the attached volume
- self.admin_authorized_contexts = [
- self.system_admin_context]
- # Check that non-system or non-admin is not able to update
- # the attached volume.
- self.admin_unauthorized_contexts = [
- self.legacy_admin_context, self.system_member_context,
- self.system_reader_context, self.system_foo_context,
- self.project_admin_context, self.project_member_context,
- self.other_project_member_context,
- self.other_project_reader_context,
- self.project_foo_context, self.project_reader_context
- ]
+ # Scope enable will not allow system admin to perform the
+ # volume attachments.
+ self.project_member_authorized_contexts = (
+ self.project_m_r_or_admin_with_scope_and_legacy)
+ self.project_reader_authorized_contexts = (
+ self.project_m_r_or_admin_with_scope_and_legacy)
+ self.project_admin_authorized_contexts = [
+ self.legacy_admin_context, self.project_admin_context]
-class VolumeAttachNoLegacyPolicyTest(VolumeAttachPolicyTest):
+
+class VolumeAttachScopeTypeNoLegacyPolicyTest(VolumeAttachScopeTypePolicyTest):
"""Test os-volume-attachments APIs policies with system scope enabled,
- and no more deprecated rules that allow the legacy admin API to access
- system_admin_or_owner APIs.
+ and no legacy deprecated rules.
"""
without_deprecated_rules = True
def setUp(self):
- super(VolumeAttachNoLegacyPolicyTest, self).setUp()
+ super(VolumeAttachScopeTypeNoLegacyPolicyTest, self).setUp()
self.flags(enforce_scope=True, group="oslo_policy")
-
- # Check that system or projct admin or owner is able to
- # list/create/show/delete the attached volume.
- self.admin_or_owner_authorized_contexts = [
- self.system_admin_context,
- self.project_admin_context,
- self.project_member_context
- ]
-
- # Check that non-system and non-admin/owner is not able to
- # list/create/show/delete the attached volume.
- self.admin_or_owner_unauthorized_contexts = [
- self.legacy_admin_context, self.system_member_context,
- self.system_reader_context, self.project_reader_context,
- self.project_foo_context, self.system_foo_context,
- self.other_project_member_context,
- self.other_project_reader_context,
- ]
-
- # Check that admin is able to update the attached volume
- self.admin_authorized_contexts = [
- self.system_admin_context
- ]
- # Check that non-admin is not able to update the attached
- # volume
- self.admin_unauthorized_contexts = [
- self.legacy_admin_context, self.system_member_context,
- self.system_reader_context, self.system_foo_context,
- self.project_admin_context, self.project_member_context,
- self.other_project_member_context,
- self.other_project_reader_context,
- self.project_foo_context, self.project_reader_context
- ]
-
- self.reader_authorized_contexts = [
- self.system_admin_context, self.system_reader_context,
- self.system_member_context, self.project_admin_context,
- self.project_reader_context, self.project_member_context
- ]
-
- self.reader_unauthorized_contexts = [
- self.legacy_admin_context, self.system_foo_context,
- self.project_foo_context,
- self.other_project_member_context,
- self.other_project_reader_context,
- ]
+ # With scope enable and no legacy rule, it will not allow
+ # system users and project admin/member/reader will be able to
+ # perform volume attachment operation on its own project.
+ self.project_member_authorized_contexts = (
+ self.project_member_or_admin_with_scope_no_legacy)
+ self.project_reader_authorized_contexts = (
+ self.project_reader_or_admin_with_scope_no_legacy)
class VolumesPolicyTest(base.BasePolicyTest):
@@ -336,14 +282,23 @@ class VolumesPolicyTest(base.BasePolicyTest):
self.snapshot_ctlr = volumes_v21.SnapshotController()
self.req = fakes.HTTPRequest.blank('')
self.controller._translate_volume_summary_view = mock.MagicMock()
- # Check that everyone is able to perform crud operations
+ # Everyone will be able to perform crud operations
# on volume and volume snapshots.
# NOTE: Nova cannot verify the volume/snapshot owner during nova policy
# enforcement so will be passing context's project_id as target to
# policy and always pass. If requester is not admin or owner
# of volume/snapshot then cinder will be returning the appropriate
# error.
- self.everyone_authorized_contexts = [
+ self.project_member_authorized_contexts = [
+ self.legacy_admin_context, self.system_admin_context,
+ self.project_admin_context, self.project_member_context,
+ self.project_reader_context, self.project_foo_context,
+ self.other_project_reader_context,
+ self.system_member_context, self.system_reader_context,
+ self.system_foo_context,
+ self.other_project_member_context
+ ]
+ self.project_reader_authorized_contexts = [
self.legacy_admin_context, self.system_admin_context,
self.project_admin_context, self.project_member_context,
self.project_reader_context, self.project_foo_context,
@@ -352,94 +307,133 @@ class VolumesPolicyTest(base.BasePolicyTest):
self.system_foo_context,
self.other_project_member_context
]
- self.everyone_unauthorized_contexts = []
@mock.patch('nova.volume.cinder.API.get_all')
def test_list_volumes_policy(self, mock_get):
- rule_name = "os_compute_api:os-volumes"
- self.common_policy_check(self.everyone_authorized_contexts,
- self.everyone_unauthorized_contexts,
- rule_name, self.controller.index,
- self.req)
+ rule_name = "os_compute_api:os-volumes:list"
+ self.common_policy_auth(self.project_reader_authorized_contexts,
+ rule_name, self.controller.index,
+ self.req)
@mock.patch('nova.volume.cinder.API.get_all')
def test_list_detail_volumes_policy(self, mock_get):
- rule_name = "os_compute_api:os-volumes"
- self.common_policy_check(self.everyone_authorized_contexts,
- self.everyone_unauthorized_contexts,
- rule_name, self.controller.detail,
- self.req)
+ rule_name = "os_compute_api:os-volumes:detail"
+ self.common_policy_auth(self.project_reader_authorized_contexts,
+ rule_name, self.controller.detail,
+ self.req)
@mock.patch('nova.volume.cinder.API.get')
def test_show_volume_policy(self, mock_get):
- rule_name = "os_compute_api:os-volumes"
- self.common_policy_check(self.everyone_authorized_contexts,
- self.everyone_unauthorized_contexts,
- rule_name, self.controller.show,
- self.req, uuids.fake_id)
+ rule_name = "os_compute_api:os-volumes:show"
+ self.common_policy_auth(self.project_reader_authorized_contexts,
+ rule_name, self.controller.show,
+ self.req, uuids.fake_id)
@mock.patch('nova.api.openstack.compute.volumes.'
'_translate_volume_detail_view')
@mock.patch('nova.volume.cinder.API.create')
def test_create_volumes_policy(self, mock_create, mock_view):
- rule_name = "os_compute_api:os-volumes"
+ rule_name = "os_compute_api:os-volumes:create"
body = {"volume": {"size": 100,
"display_name": "Volume Test Name",
"display_description": "Volume Test Desc",
"availability_zone": "zone1:host1"}}
- self.common_policy_check(self.everyone_authorized_contexts,
- self.everyone_unauthorized_contexts,
- rule_name, self.controller.create,
- self.req, body=body)
+ self.common_policy_auth(self.project_member_authorized_contexts,
+ rule_name, self.controller.create,
+ self.req, body=body)
@mock.patch('nova.volume.cinder.API.delete')
def test_delete_volume_policy(self, mock_delete):
- rule_name = "os_compute_api:os-volumes"
- self.common_policy_check(self.everyone_authorized_contexts,
- self.everyone_unauthorized_contexts,
- rule_name, self.controller.delete,
- self.req, uuids.fake_id)
+ rule_name = "os_compute_api:os-volumes:delete"
+ self.common_policy_auth(self.project_member_authorized_contexts,
+ rule_name, self.controller.delete,
+ self.req, uuids.fake_id)
@mock.patch('nova.volume.cinder.API.get_all_snapshots')
def test_list_snapshots_policy(self, mock_get):
- rule_name = "os_compute_api:os-volumes"
- self.common_policy_check(self.everyone_authorized_contexts,
- self.everyone_unauthorized_contexts,
- rule_name, self.snapshot_ctlr.index,
- self.req)
+ rule_name = "os_compute_api:os-volumes:snapshots:list"
+ self.common_policy_auth(self.project_reader_authorized_contexts,
+ rule_name, self.snapshot_ctlr.index,
+ self.req)
@mock.patch('nova.volume.cinder.API.get_all_snapshots')
def test_list_detail_snapshots_policy(self, mock_get):
- rule_name = "os_compute_api:os-volumes"
- self.common_policy_check(self.everyone_authorized_contexts,
- self.everyone_unauthorized_contexts,
- rule_name, self.snapshot_ctlr.detail,
- self.req)
+ rule_name = "os_compute_api:os-volumes:snapshots:detail"
+ self.common_policy_auth(self.project_reader_authorized_contexts,
+ rule_name, self.snapshot_ctlr.detail,
+ self.req)
@mock.patch('nova.volume.cinder.API.get_snapshot')
def test_show_snapshot_policy(self, mock_get):
- rule_name = "os_compute_api:os-volumes"
- self.common_policy_check(self.everyone_authorized_contexts,
- self.everyone_unauthorized_contexts,
- rule_name, self.snapshot_ctlr.show,
- self.req, uuids.fake_id)
+ rule_name = "os_compute_api:os-volumes:snapshots:show"
+ self.common_policy_auth(self.project_reader_authorized_contexts,
+ rule_name, self.snapshot_ctlr.show,
+ self.req, uuids.fake_id)
@mock.patch('nova.volume.cinder.API.create_snapshot')
def test_create_snapshot_policy(self, mock_create):
- rule_name = "os_compute_api:os-volumes"
+ rule_name = "os_compute_api:os-volumes:snapshots:create"
body = {"snapshot": {"volume_id": uuids.fake_id}}
- self.common_policy_check(self.everyone_authorized_contexts,
- self.everyone_unauthorized_contexts,
- rule_name, self.snapshot_ctlr.create,
- self.req, body=body)
+ self.common_policy_auth(self.project_member_authorized_contexts,
+ rule_name, self.snapshot_ctlr.create,
+ self.req, body=body)
@mock.patch('nova.volume.cinder.API.delete_snapshot')
def test_delete_snapshot_policy(self, mock_delete):
- rule_name = "os_compute_api:os-volumes"
- self.common_policy_check(self.everyone_authorized_contexts,
- self.everyone_unauthorized_contexts,
- rule_name, self.snapshot_ctlr.delete,
- self.req, uuids.fake_id)
+ rule_name = "os_compute_api:os-volumes:snapshots:delete"
+ self.common_policy_auth(self.project_member_authorized_contexts,
+ rule_name, self.snapshot_ctlr.delete,
+ self.req, uuids.fake_id)
+
+
+class VolumesNoLegacyNoScopePolicyTest(VolumesPolicyTest):
+ """Test Volume APIs policies with no legacy deprecated rules
+ and no scope checks which means new defaults only.
+
+ """
+
+ without_deprecated_rules = True
+ rules_without_deprecation = {
+ v_policies.POLICY_NAME % 'list':
+ base_policy.PROJECT_READER_OR_ADMIN,
+ v_policies.POLICY_NAME % 'detail':
+ base_policy.PROJECT_READER_OR_ADMIN,
+ v_policies.POLICY_NAME % 'show':
+ base_policy.PROJECT_READER_OR_ADMIN,
+ v_policies.POLICY_NAME % 'create':
+ base_policy.PROJECT_MEMBER_OR_ADMIN,
+ v_policies.POLICY_NAME % 'delete':
+ base_policy.PROJECT_MEMBER_OR_ADMIN,
+ v_policies.POLICY_NAME % 'snapshots:list':
+ base_policy.PROJECT_READER_OR_ADMIN,
+ v_policies.POLICY_NAME % 'snapshots:detail':
+ base_policy.PROJECT_READER_OR_ADMIN,
+ v_policies.POLICY_NAME % 'snapshots:delete':
+ base_policy.PROJECT_MEMBER_OR_ADMIN,
+ v_policies.POLICY_NAME % 'snapshots:create':
+ base_policy.PROJECT_MEMBER_OR_ADMIN,
+ v_policies.POLICY_NAME % 'snapshots:show':
+ base_policy.PROJECT_READER_OR_ADMIN,
+ }
+
+ def setUp(self):
+ super(VolumesNoLegacyNoScopePolicyTest, self).setUp()
+ # With no legacy, project other roles like foo will not be able
+ # to operate on volume and snapshot.
+ self.project_member_authorized_contexts = [
+ self.legacy_admin_context, self.system_admin_context,
+ self.project_admin_context, self.project_member_context,
+ self.system_member_context,
+ self.other_project_member_context
+ ]
+ self.project_reader_authorized_contexts = [
+ self.legacy_admin_context, self.system_admin_context,
+ self.project_admin_context, self.project_member_context,
+ self.project_reader_context,
+ self.other_project_reader_context,
+ self.system_member_context, self.system_reader_context,
+ self.other_project_member_context
+ ]
class VolumesScopeTypePolicyTest(VolumesPolicyTest):
@@ -456,3 +450,65 @@ class VolumesScopeTypePolicyTest(VolumesPolicyTest):
def setUp(self):
super(VolumesScopeTypePolicyTest, self).setUp()
self.flags(enforce_scope=True, group="oslo_policy")
+ # With scope enabled, system users will not be able to
+ # operate on volume and snapshot.
+ self.project_member_authorized_contexts = [
+ self.legacy_admin_context, self.project_admin_context,
+ self.project_member_context, self.project_reader_context,
+ self.project_foo_context, self.other_project_reader_context,
+ self.other_project_member_context
+ ]
+ self.project_reader_authorized_contexts = [
+ self.legacy_admin_context, self.project_admin_context,
+ self.project_member_context, self.project_reader_context,
+ self.project_foo_context, self.other_project_reader_context,
+ self.other_project_member_context
+ ]
+
+
+class VolumesScopeTypeNoLegacyPolicyTest(VolumesScopeTypePolicyTest):
+ """Test Volume APIs policies with system scope enabled,
+ and no legacy deprecated rules.
+ """
+ without_deprecated_rules = True
+
+ rules_without_deprecation = {
+ v_policies.POLICY_NAME % 'list':
+ base_policy.PROJECT_READER_OR_ADMIN,
+ v_policies.POLICY_NAME % 'detail':
+ base_policy.PROJECT_READER_OR_ADMIN,
+ v_policies.POLICY_NAME % 'show':
+ base_policy.PROJECT_READER_OR_ADMIN,
+ v_policies.POLICY_NAME % 'create':
+ base_policy.PROJECT_MEMBER_OR_ADMIN,
+ v_policies.POLICY_NAME % 'delete':
+ base_policy.PROJECT_MEMBER_OR_ADMIN,
+ v_policies.POLICY_NAME % 'snapshots:list':
+ base_policy.PROJECT_READER_OR_ADMIN,
+ v_policies.POLICY_NAME % 'snapshots:detail':
+ base_policy.PROJECT_READER_OR_ADMIN,
+ v_policies.POLICY_NAME % 'snapshots:delete':
+ base_policy.PROJECT_MEMBER_OR_ADMIN,
+ v_policies.POLICY_NAME % 'snapshots:create':
+ base_policy.PROJECT_MEMBER_OR_ADMIN,
+ v_policies.POLICY_NAME % 'snapshots:show':
+ base_policy.PROJECT_READER_OR_ADMIN,
+ }
+
+ def setUp(self):
+ super(VolumesScopeTypeNoLegacyPolicyTest, self).setUp()
+ self.flags(enforce_scope=True, group="oslo_policy")
+ # With no legacy and scope enabled, system users and project
+ # other roles like foo will not be able to operate on volume
+ # and snapshot.
+ self.project_member_authorized_contexts = [
+ self.legacy_admin_context, self.project_admin_context,
+ self.project_member_context,
+ self.other_project_member_context
+ ]
+ self.project_reader_authorized_contexts = [
+ self.legacy_admin_context, self.project_admin_context,
+ self.project_member_context, self.project_reader_context,
+ self.other_project_reader_context,
+ self.other_project_member_context
+ ]
diff --git a/nova/tests/unit/privsep/test_fs.py b/nova/tests/unit/privsep/test_fs.py
index 89062acce9..919b6c553d 100644
--- a/nova/tests/unit/privsep/test_fs.py
+++ b/nova/tests/unit/privsep/test_fs.py
@@ -14,7 +14,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
import nova.privsep.fs
from nova import test
diff --git a/nova/tests/unit/privsep/test_idmapshift.py b/nova/tests/unit/privsep/test_idmapshift.py
index 2b5acbe33c..7c6f7833ff 100644
--- a/nova/tests/unit/privsep/test_idmapshift.py
+++ b/nova/tests/unit/privsep/test_idmapshift.py
@@ -13,9 +13,9 @@
# limitations under the License.
from io import StringIO
+from unittest import mock
import fixtures
-import mock
import nova.privsep.idmapshift
from nova import test
diff --git a/nova/tests/unit/privsep/test_libvirt.py b/nova/tests/unit/privsep/test_libvirt.py
index 32d375bb1c..eebcf6c231 100644
--- a/nova/tests/unit/privsep/test_libvirt.py
+++ b/nova/tests/unit/privsep/test_libvirt.py
@@ -15,8 +15,9 @@
# under the License.
import binascii
+from unittest import mock
+
import ddt
-import mock
import os
import nova.privsep.libvirt
diff --git a/nova/tests/unit/privsep/test_linux_net.py b/nova/tests/unit/privsep/test_linux_net.py
index 5bdac6ca02..6b226359c3 100644
--- a/nova/tests/unit/privsep/test_linux_net.py
+++ b/nova/tests/unit/privsep/test_linux_net.py
@@ -13,7 +13,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from oslo_concurrency import processutils
diff --git a/nova/tests/unit/privsep/test_path.py b/nova/tests/unit/privsep/test_path.py
index 1b4955837d..853ee01d09 100644
--- a/nova/tests/unit/privsep/test_path.py
+++ b/nova/tests/unit/privsep/test_path.py
@@ -14,8 +14,9 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
import os
+from unittest import mock
+
import tempfile
from nova import exception
diff --git a/nova/tests/unit/privsep/test_qemu.py b/nova/tests/unit/privsep/test_qemu.py
index 85c48aa4ae..f3fe5599f2 100644
--- a/nova/tests/unit/privsep/test_qemu.py
+++ b/nova/tests/unit/privsep/test_qemu.py
@@ -13,7 +13,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
import nova.privsep.qemu
from nova import test
diff --git a/nova/tests/unit/privsep/test_utils.py b/nova/tests/unit/privsep/test_utils.py
index 84d0767c29..887e6dfa8b 100644
--- a/nova/tests/unit/privsep/test_utils.py
+++ b/nova/tests/unit/privsep/test_utils.py
@@ -13,8 +13,8 @@
# under the License.
import errno
-import mock
import os
+from unittest import mock
import nova.privsep.utils
from nova import test
diff --git a/nova/tests/unit/scheduler/client/test_query.py b/nova/tests/unit/scheduler/client/test_query.py
index f8ea4aa337..fe23cf88e3 100644
--- a/nova/tests/unit/scheduler/client/test_query.py
+++ b/nova/tests/unit/scheduler/client/test_query.py
@@ -10,7 +10,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from oslo_utils.fixture import uuidsentinel as uuids
from nova import context
diff --git a/nova/tests/unit/scheduler/client/test_report.py b/nova/tests/unit/scheduler/client/test_report.py
index a6097cf164..40ebac9af9 100644
--- a/nova/tests/unit/scheduler/client/test_report.py
+++ b/nova/tests/unit/scheduler/client/test_report.py
@@ -9,13 +9,15 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
+
import copy
+import ddt
import time
+from unittest import mock
from urllib import parse
import fixtures
from keystoneauth1 import exceptions as ks_exc
-import mock
import os_resource_classes as orc
from oslo_serialization import jsonutils
from oslo_utils.fixture import uuidsentinel as uuids
@@ -41,8 +43,14 @@ class SafeConnectedTestCase(test.NoDBTestCase):
super(SafeConnectedTestCase, self).setUp()
self.context = context.get_admin_context()
- with mock.patch('keystoneauth1.loading.load_auth_from_conf_options'):
- self.client = report.SchedulerReportClient()
+ # need to mock this globally as SchedulerReportClient._create_client
+ # is called again when EndpointNotFound is raised
+ self.useFixture(
+ fixtures.MonkeyPatch(
+ 'keystoneauth1.loading.load_auth_from_conf_options',
+ mock.MagicMock()))
+
+ self.client = report.SchedulerReportClient()
@mock.patch('keystoneauth1.session.Session.request')
def test_missing_endpoint(self, req):
@@ -150,6 +158,60 @@ class SafeConnectedTestCase(test.NoDBTestCase):
self.assertTrue(req.called)
+@ddt.ddt
+class TestSingleton(test.NoDBTestCase):
+ def test_singleton(self):
+ # Make sure we start with a clean slate
+ self.assertIsNone(report.PLACEMENTCLIENT)
+
+ # Make sure the first call creates the singleton, sets it
+ # globally, and returns it
+ client = report.report_client_singleton()
+ self.assertEqual(client, report.PLACEMENTCLIENT)
+
+ # Make sure that a subsequent call returns the same thing
+ # again and that the global is unchanged
+ self.assertEqual(client, report.report_client_singleton())
+ self.assertEqual(client, report.PLACEMENTCLIENT)
+
+ @ddt.data(ks_exc.EndpointNotFound,
+ ks_exc.MissingAuthPlugin,
+ ks_exc.Unauthorized,
+ ks_exc.DiscoveryFailure,
+ ks_exc.ConnectFailure,
+ ks_exc.RequestTimeout,
+ ks_exc.GatewayTimeout,
+ test.TestingException)
+ def test_errors(self, exc):
+ self._test_error(exc)
+
+ @mock.patch.object(report, 'LOG')
+ def _test_error(self, exc, mock_log):
+ with mock.patch.object(report.SchedulerReportClient, '_create_client',
+ side_effect=exc):
+ self.assertRaises(exc, report.report_client_singleton)
+ mock_log.error.assert_called_once()
+
+ def test_error_then_success(self):
+ # Simulate an error
+ self._test_error(ks_exc.ConnectFailure)
+
+ # Ensure we did not set the global client
+ self.assertIsNone(report.PLACEMENTCLIENT)
+
+ # Call again, with no error
+ client = report.report_client_singleton()
+
+ # Make sure we got a client and that it was set as the global
+ # one
+ self.assertIsNotNone(client)
+ self.assertEqual(client, report.PLACEMENTCLIENT)
+
+ # Make sure we keep getting the same one
+ client2 = report.report_client_singleton()
+ self.assertEqual(client, client2)
+
+
class TestConstructor(test.NoDBTestCase):
def setUp(self):
super(TestConstructor, self).setUp()
@@ -4637,3 +4699,31 @@ class TestUsages(SchedulerReportClientTestCase):
expected = {'project': {'cores': 4, 'ram': 0},
'user': {'cores': 4, 'ram': 0}}
self.assertDictEqual(expected, counts)
+
+ @mock.patch('nova.scheduler.client.report.SchedulerReportClient.get')
+ def test_get_usages_counts_for_limits(self, mock_get):
+ fake_responses = fake_requests.FakeResponse(
+ 200,
+ content=jsonutils.dumps({'usages': {orc.VCPU: 2, orc.PCPU: 2}}))
+ mock_get.return_value = fake_responses
+
+ counts = self.client.get_usages_counts_for_limits(
+ self.context, 'fake-project')
+
+ expected = {orc.VCPU: 2, orc.PCPU: 2}
+ self.assertDictEqual(expected, counts)
+ self.assertEqual(1, mock_get.call_count)
+
+ @mock.patch('nova.scheduler.client.report.SchedulerReportClient.get')
+ def test_get_usages_counts_for_limits_fails(self, mock_get):
+ fake_failure_response = fake_requests.FakeResponse(500)
+ mock_get.side_effect = [ks_exc.ConnectFailure, fake_failure_response]
+
+ e = self.assertRaises(exception.UsagesRetrievalFailed,
+ self.client.get_usages_counts_for_limits,
+ self.context, 'fake-project')
+
+ expected = "Failed to retrieve usages for project 'fake-project' " \
+ "and user 'N/A'."
+ self.assertEqual(expected, str(e))
+ self.assertEqual(2, mock_get.call_count)
diff --git a/nova/tests/unit/scheduler/fakes.py b/nova/tests/unit/scheduler/fakes.py
index 658c82c20e..f5dcf87e4a 100644
--- a/nova/tests/unit/scheduler/fakes.py
+++ b/nova/tests/unit/scheduler/fakes.py
@@ -34,6 +34,7 @@ NUMA_TOPOLOGY = objects.NUMATopology(cells=[
memory=512,
cpu_usage=0,
memory_usage=0,
+ socket=0,
pinned_cpus=set(),
mempages=[
objects.NUMAPagesTopology(size_kb=16, total=387184, used=0),
@@ -46,6 +47,7 @@ NUMA_TOPOLOGY = objects.NUMATopology(cells=[
memory=512,
cpu_usage=0,
memory_usage=0,
+ socket=0,
pinned_cpus=set(),
mempages=[
objects.NUMAPagesTopology(size_kb=4, total=1548736, used=0),
diff --git a/nova/tests/unit/scheduler/filters/test_affinity_filters.py b/nova/tests/unit/scheduler/filters/test_affinity_filters.py
index 45c4d9834c..778fbd9073 100644
--- a/nova/tests/unit/scheduler/filters/test_affinity_filters.py
+++ b/nova/tests/unit/scheduler/filters/test_affinity_filters.py
@@ -10,7 +10,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from oslo_utils.fixture import uuidsentinel as uuids
from nova import objects
diff --git a/nova/tests/unit/scheduler/filters/test_aggregate_image_properties_isolation_filters.py b/nova/tests/unit/scheduler/filters/test_aggregate_image_properties_isolation_filters.py
index f17a7168f1..09b8d728b2 100644
--- a/nova/tests/unit/scheduler/filters/test_aggregate_image_properties_isolation_filters.py
+++ b/nova/tests/unit/scheduler/filters/test_aggregate_image_properties_isolation_filters.py
@@ -10,7 +10,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from nova import objects
from nova.scheduler.filters import aggregate_image_properties_isolation as aipi
diff --git a/nova/tests/unit/scheduler/filters/test_aggregate_instance_extra_specs_filters.py b/nova/tests/unit/scheduler/filters/test_aggregate_instance_extra_specs_filters.py
index 3567d85a62..971e1a366c 100644
--- a/nova/tests/unit/scheduler/filters/test_aggregate_instance_extra_specs_filters.py
+++ b/nova/tests/unit/scheduler/filters/test_aggregate_instance_extra_specs_filters.py
@@ -10,7 +10,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from nova import objects
from nova.scheduler.filters import aggregate_instance_extra_specs as agg_specs
diff --git a/nova/tests/unit/scheduler/filters/test_aggregate_multitenancy_isolation_filters.py b/nova/tests/unit/scheduler/filters/test_aggregate_multitenancy_isolation_filters.py
index 6e6ae9a421..7f2f75a5bd 100644
--- a/nova/tests/unit/scheduler/filters/test_aggregate_multitenancy_isolation_filters.py
+++ b/nova/tests/unit/scheduler/filters/test_aggregate_multitenancy_isolation_filters.py
@@ -10,7 +10,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from nova import objects
from nova.scheduler.filters import aggregate_multitenancy_isolation as ami
diff --git a/nova/tests/unit/scheduler/filters/test_availability_zone_filters.py b/nova/tests/unit/scheduler/filters/test_availability_zone_filters.py
index 2c1a43225e..38a75452ba 100644
--- a/nova/tests/unit/scheduler/filters/test_availability_zone_filters.py
+++ b/nova/tests/unit/scheduler/filters/test_availability_zone_filters.py
@@ -10,7 +10,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from nova import objects
from nova.scheduler.filters import availability_zone_filter
diff --git a/nova/tests/unit/scheduler/filters/test_compute_capabilities_filters.py b/nova/tests/unit/scheduler/filters/test_compute_capabilities_filters.py
index a90ec4367d..cbb8c31601 100644
--- a/nova/tests/unit/scheduler/filters/test_compute_capabilities_filters.py
+++ b/nova/tests/unit/scheduler/filters/test_compute_capabilities_filters.py
@@ -48,7 +48,7 @@ class TestComputeCapabilitiesFilter(test.NoDBTestCase):
flavor=objects.Flavor(memory_mb=1024, extra_specs=especs))
self.assertFalse(self.filt_cls.host_passes(None, spec_obj))
- def test_compute_filter_fails_without_capabilites(self):
+ def test_compute_filter_fails_without_capabilities(self):
cpu_info = """ { } """
cpu_info = str(cpu_info)
diff --git a/nova/tests/unit/scheduler/filters/test_compute_filters.py b/nova/tests/unit/scheduler/filters/test_compute_filters.py
index d9cee4c410..335b9d07be 100644
--- a/nova/tests/unit/scheduler/filters/test_compute_filters.py
+++ b/nova/tests/unit/scheduler/filters/test_compute_filters.py
@@ -10,7 +10,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from nova import objects
from nova.scheduler.filters import compute_filter
diff --git a/nova/tests/unit/scheduler/filters/test_io_ops_filters.py b/nova/tests/unit/scheduler/filters/test_io_ops_filters.py
index fd0dc3aca1..3b06aaf069 100644
--- a/nova/tests/unit/scheduler/filters/test_io_ops_filters.py
+++ b/nova/tests/unit/scheduler/filters/test_io_ops_filters.py
@@ -11,7 +11,7 @@
# under the License.
-import mock
+from unittest import mock
from nova import objects
from nova.scheduler.filters import io_ops_filter
diff --git a/nova/tests/unit/scheduler/filters/test_num_instances_filters.py b/nova/tests/unit/scheduler/filters/test_num_instances_filters.py
index 070cc3a785..b43a9b1dc1 100644
--- a/nova/tests/unit/scheduler/filters/test_num_instances_filters.py
+++ b/nova/tests/unit/scheduler/filters/test_num_instances_filters.py
@@ -10,7 +10,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from nova import objects
from nova.scheduler.filters import num_instances_filter
diff --git a/nova/tests/unit/scheduler/filters/test_numa_topology_filters.py b/nova/tests/unit/scheduler/filters/test_numa_topology_filters.py
index 0ebe95d5e4..ba9073e0df 100644
--- a/nova/tests/unit/scheduler/filters/test_numa_topology_filters.py
+++ b/nova/tests/unit/scheduler/filters/test_numa_topology_filters.py
@@ -11,6 +11,7 @@
# under the License.
import itertools
+from unittest import mock
from oslo_utils.fixture import uuidsentinel as uuids
@@ -53,7 +54,9 @@ class TestNUMATopologyFilter(test.NoDBTestCase):
{'numa_topology': fakes.NUMA_TOPOLOGY,
'pci_stats': None,
'cpu_allocation_ratio': 16.0,
- 'ram_allocation_ratio': 1.5})
+ 'ram_allocation_ratio': 1.5,
+ 'allocation_candidates': [{"mappings": {}}]
+ })
self.assertTrue(self.filt_cls.host_passes(host, spec_obj))
def test_numa_topology_filter_numa_instance_no_numa_host_fail(self):
@@ -132,7 +135,9 @@ class TestNUMATopologyFilter(test.NoDBTestCase):
{'numa_topology': fakes.NUMA_TOPOLOGY,
'pci_stats': None,
'cpu_allocation_ratio': 21,
- 'ram_allocation_ratio': 1.3})
+ 'ram_allocation_ratio': 1.3,
+ 'allocation_candidates': [{"mappings": {}}]
+ })
self.assertTrue(self.filt_cls.host_passes(host, spec_obj))
limits = host.limits['numa_topology']
self.assertEqual(limits.cpu_allocation_ratio, 21)
@@ -180,7 +185,9 @@ class TestNUMATopologyFilter(test.NoDBTestCase):
'numa_topology': numa_topology,
'pci_stats': None,
'cpu_allocation_ratio': 1,
- 'ram_allocation_ratio': 1.5})
+ 'ram_allocation_ratio': 1.5,
+ 'allocation_candidates': [{"mappings": {}}],
+ })
assertion = self.assertTrue if passes else self.assertFalse
# test combinations of image properties and extra specs
@@ -237,7 +244,9 @@ class TestNUMATopologyFilter(test.NoDBTestCase):
{'numa_topology': fakes.NUMA_TOPOLOGY,
'pci_stats': None,
'cpu_allocation_ratio': 16.0,
- 'ram_allocation_ratio': 1.5})
+ 'ram_allocation_ratio': 1.5,
+ 'allocation_candidates': [{"mappings": {}}]
+ })
self.assertTrue(self.filt_cls.host_passes(host, spec_obj))
def test_numa_topology_filter_fail_mempages(self):
@@ -287,7 +296,9 @@ class TestNUMATopologyFilter(test.NoDBTestCase):
'numa_topology': host_topology,
'pci_stats': None,
'cpu_allocation_ratio': 16.0,
- 'ram_allocation_ratio': 1.5})
+ 'ram_allocation_ratio': 1.5,
+ 'allocation_candidates': [{"mappings": {}}],
+ })
def test_numa_topology_filter_pass_networks(self):
host = self._get_fake_host_state_with_networks()
@@ -329,3 +340,79 @@ class TestNUMATopologyFilter(test.NoDBTestCase):
network_metadata=network_metadata)
self.assertFalse(self.filt_cls.host_passes(host, spec_obj))
+
+ @mock.patch("nova.virt.hardware.numa_fit_instance_to_host")
+ def test_filters_candidates(self, mock_numa_fit):
+ instance_topology = objects.InstanceNUMATopology(
+ cells=[
+ objects.InstanceNUMACell(
+ id=0, cpuset=set([1]), pcpuset=set(), memory=512
+ ),
+ ]
+ )
+ spec_obj = self._get_spec_obj(numa_topology=instance_topology)
+ host = fakes.FakeHostState(
+ "host1",
+ "node1",
+ {
+ "numa_topology": fakes.NUMA_TOPOLOGY,
+ "pci_stats": None,
+ "cpu_allocation_ratio": 16.0,
+ "ram_allocation_ratio": 1.5,
+ # simulate that placement returned 3 candidates for this host
+ "allocation_candidates": [
+ {"mappings": {f"{uuids.req1}-0": ["candidate_rp_1"]}},
+ {"mappings": {f"{uuids.req1}-0": ["candidate_rp_2"]}},
+ {"mappings": {f"{uuids.req1}-0": ["candidate_rp_3"]}},
+ ],
+ },
+ )
+ # and that from those candidates only the second matches the numa logic
+ mock_numa_fit.side_effect = [False, True, False]
+
+ # run the filter and expect that the host passes as it has at least
+ # one viable candidate
+ self.assertTrue(self.filt_cls.host_passes(host, spec_obj))
+ # also assert that the filter checked all three candidates
+ self.assertEqual(3, len(mock_numa_fit.mock_calls))
+ # and also it reduced the candidates in the host state to the only
+ # matching one
+ self.assertEqual(1, len(host.allocation_candidates))
+ self.assertEqual(
+ {"mappings": {f"{uuids.req1}-0": ["candidate_rp_2"]}},
+ host.allocation_candidates[0],
+ )
+
+ @mock.patch("nova.virt.hardware.numa_fit_instance_to_host")
+ def test_filter_fails_if_no_matching_candidate_left(self, mock_numa_fit):
+ instance_topology = objects.InstanceNUMATopology(
+ cells=[
+ objects.InstanceNUMACell(
+ id=0, cpuset=set([1]), pcpuset=set(), memory=512
+ ),
+ ]
+ )
+ spec_obj = self._get_spec_obj(numa_topology=instance_topology)
+ host = fakes.FakeHostState(
+ "host1",
+ "node1",
+ {
+ "numa_topology": fakes.NUMA_TOPOLOGY,
+ "pci_stats": None,
+ "cpu_allocation_ratio": 16.0,
+ "ram_allocation_ratio": 1.5,
+ # simulate that placement returned 1 candidate for this host
+ "allocation_candidates": [
+ {"mappings": {f"{uuids.req1}-0": ["candidate_rp_1"]}},
+ ],
+ },
+ )
+ # simulate that the only candidate we have does not match
+ mock_numa_fit.side_effect = [False]
+
+ # run the filter and expect that it fails the host as there is no
+ # viable candidate left
+ self.assertFalse(self.filt_cls.host_passes(host, spec_obj))
+ self.assertEqual(1, len(mock_numa_fit.mock_calls))
+ # and also it made the candidates list empty in the host state
+ self.assertEqual(0, len(host.allocation_candidates))
diff --git a/nova/tests/unit/scheduler/filters/test_pci_passthrough_filters.py b/nova/tests/unit/scheduler/filters/test_pci_passthrough_filters.py
index c500b4a887..27d80b884e 100644
--- a/nova/tests/unit/scheduler/filters/test_pci_passthrough_filters.py
+++ b/nova/tests/unit/scheduler/filters/test_pci_passthrough_filters.py
@@ -10,7 +10,9 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
+from oslo_utils.fixture import uuidsentinel as uuids
from nova import objects
from nova.pci import stats
@@ -33,11 +35,16 @@ class TestPCIPassthroughFilter(test.NoDBTestCase):
requests = objects.InstancePCIRequests(requests=[request])
spec_obj = objects.RequestSpec(pci_requests=requests)
host = fakes.FakeHostState(
- 'host1', 'node1',
- attribute_dict={'pci_stats': pci_stats_mock})
+ "host1",
+ "node1",
+ attribute_dict={
+ "pci_stats": pci_stats_mock,
+ "allocation_candidates": [{"mappings": {}}],
+ },
+ )
self.assertTrue(self.filt_cls.host_passes(host, spec_obj))
pci_stats_mock.support_requests.assert_called_once_with(
- requests.requests)
+ requests.requests, provider_mapping={})
def test_pci_passthrough_fail(self):
pci_stats_mock = mock.MagicMock()
@@ -47,11 +54,16 @@ class TestPCIPassthroughFilter(test.NoDBTestCase):
requests = objects.InstancePCIRequests(requests=[request])
spec_obj = objects.RequestSpec(pci_requests=requests)
host = fakes.FakeHostState(
- 'host1', 'node1',
- attribute_dict={'pci_stats': pci_stats_mock})
+ "host1",
+ "node1",
+ attribute_dict={
+ "pci_stats": pci_stats_mock,
+ "allocation_candidates": [{"mappings": {}}],
+ },
+ )
self.assertFalse(self.filt_cls.host_passes(host, spec_obj))
pci_stats_mock.support_requests.assert_called_once_with(
- requests.requests)
+ requests.requests, provider_mapping={})
def test_pci_passthrough_no_pci_request(self):
spec_obj = objects.RequestSpec(pci_requests=None)
@@ -82,3 +94,92 @@ class TestPCIPassthroughFilter(test.NoDBTestCase):
host = fakes.FakeHostState('host1', 'node1',
attribute_dict={'pci_stats': None})
self.assertFalse(self.filt_cls.host_passes(host, spec_obj))
+
+ def test_filters_candidates(self):
+ pci_stats_mock = mock.MagicMock()
+ # simulate that only the second allocation candidate fits
+ pci_stats_mock.support_requests.side_effect = [False, True, False]
+ request = objects.InstancePCIRequest(
+ count=1,
+ spec=[{"vendor_id": "8086"}],
+ request_id=uuids.req1,
+ )
+ requests = objects.InstancePCIRequests(requests=[request])
+ spec_obj = objects.RequestSpec(pci_requests=requests)
+ host = fakes.FakeHostState(
+ "host1",
+ "node1",
+ attribute_dict={
+ "pci_stats": pci_stats_mock,
+ # simulate the placement returned 3 possible candidates
+ "allocation_candidates": [
+ {"mappings": {f"{uuids.req1}-0": ["candidate_rp_1"]}},
+ {"mappings": {f"{uuids.req1}-0": ["candidate_rp_2"]}},
+ {"mappings": {f"{uuids.req1}-0": ["candidate_rp_3"]}},
+ ],
+ },
+ )
+
+ # run the filter and expect that it passes the host as there is at
+ # least one viable candidate
+ self.assertTrue(self.filt_cls.host_passes(host, spec_obj))
+
+ # also assert that the filter checked all three candidates
+ pci_stats_mock.support_requests.assert_has_calls(
+ [
+ mock.call(
+ requests.requests,
+ provider_mapping={f"{uuids.req1}-0": ["candidate_rp_1"]},
+ ),
+ mock.call(
+ requests.requests,
+ provider_mapping={f"{uuids.req1}-0": ["candidate_rp_2"]},
+ ),
+ mock.call(
+ requests.requests,
+ provider_mapping={f"{uuids.req1}-0": ["candidate_rp_3"]},
+ ),
+ ]
+ )
+ # and also it reduced the candidates in the host state to the only
+ # matching one
+ self.assertEqual(1, len(host.allocation_candidates))
+ self.assertEqual(
+ {"mappings": {f"{uuids.req1}-0": ["candidate_rp_2"]}},
+ host.allocation_candidates[0],
+ )
+
+ def test_filter_fails_if_no_matching_candidate_left(self):
+ pci_stats_mock = mock.MagicMock()
+ # simulate that the only candidate we have does not match
+ pci_stats_mock.support_requests.side_effect = [False]
+ request = objects.InstancePCIRequest(
+ count=1,
+ spec=[{"vendor_id": "8086"}],
+ request_id=uuids.req1,
+ )
+ requests = objects.InstancePCIRequests(requests=[request])
+ spec_obj = objects.RequestSpec(pci_requests=requests)
+ host = fakes.FakeHostState(
+ "host1",
+ "node1",
+ attribute_dict={
+ "pci_stats": pci_stats_mock,
+ # simulate the placement returned 3 possible candidates
+ "allocation_candidates": [
+ {"mappings": {f"{uuids.req1}-0": ["candidate_rp_1"]}},
+ ],
+ },
+ )
+
+ # run the filter and expect that it fails the host as there is no
+ # viable candidate left
+ self.assertFalse(self.filt_cls.host_passes(host, spec_obj))
+
+ # also assert that the filter checked our candidate
+ pci_stats_mock.support_requests.assert_called_once_with(
+ requests.requests,
+ provider_mapping={f"{uuids.req1}-0": ["candidate_rp_1"]},
+ )
+ # and also it made the candidates list empty in the host state
+ self.assertEqual(0, len(host.allocation_candidates))
diff --git a/nova/tests/unit/scheduler/filters/test_type_filters.py b/nova/tests/unit/scheduler/filters/test_type_filters.py
index d3f01a5c0e..c2567b5205 100644
--- a/nova/tests/unit/scheduler/filters/test_type_filters.py
+++ b/nova/tests/unit/scheduler/filters/test_type_filters.py
@@ -10,7 +10,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from nova import objects
from nova.scheduler.filters import type_filter
diff --git a/nova/tests/unit/scheduler/test_filters.py b/nova/tests/unit/scheduler/test_filters.py
index cb1c3ec32b..64f4121eb0 100644
--- a/nova/tests/unit/scheduler/test_filters.py
+++ b/nova/tests/unit/scheduler/test_filters.py
@@ -16,8 +16,8 @@ Tests For Scheduler Host Filters.
"""
import inspect
+from unittest import mock
-import mock
from oslo_utils.fixture import uuidsentinel as uuids
from nova import filters
diff --git a/nova/tests/unit/scheduler/test_host_manager.py b/nova/tests/unit/scheduler/test_host_manager.py
index 5a1e665be3..1a7daa515f 100644
--- a/nova/tests/unit/scheduler/test_host_manager.py
+++ b/nova/tests/unit/scheduler/test_host_manager.py
@@ -19,8 +19,8 @@ Tests For HostManager
import collections
import contextlib
import datetime
+from unittest import mock
-import mock
from oslo_serialization import jsonutils
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import versionutils
@@ -1562,10 +1562,14 @@ class HostStateTestCase(test.NoDBTestCase):
self.assertIsNone(host.updated)
host.consume_from_request(spec_obj)
- numa_fit_mock.assert_called_once_with(fake_host_numa_topology,
- fake_numa_topology,
- limits=None, pci_requests=None,
- pci_stats=None)
+ numa_fit_mock.assert_called_once_with(
+ fake_host_numa_topology,
+ fake_numa_topology,
+ limits=None,
+ pci_requests=None,
+ pci_stats=None,
+ provider_mapping=None,
+ )
numa_usage_mock.assert_called_once_with(fake_host_numa_topology,
fake_numa_topology)
sync_mock.assert_called_once_with(("fakehost", "fakenode"))
diff --git a/nova/tests/unit/scheduler/test_manager.py b/nova/tests/unit/scheduler/test_manager.py
index 70689f6047..e992fe6034 100644
--- a/nova/tests/unit/scheduler/test_manager.py
+++ b/nova/tests/unit/scheduler/test_manager.py
@@ -17,7 +17,9 @@
Tests For Scheduler
"""
-import mock
+from unittest import mock
+
+from keystoneauth1 import exceptions as ks_exc
import oslo_messaging as messaging
from oslo_serialization import jsonutils
from oslo_utils.fixture import uuidsentinel as uuids
@@ -25,6 +27,7 @@ from oslo_utils.fixture import uuidsentinel as uuids
from nova import context
from nova import exception
from nova import objects
+from nova.scheduler import filters
from nova.scheduler import host_manager
from nova.scheduler import manager
from nova.scheduler import utils as scheduler_utils
@@ -395,9 +398,16 @@ class SchedulerManagerTestCase(test.NoDBTestCase):
# assertion at the end of the test.
spec_obj.obj_reset_changes(recursive=True)
- host_state = mock.Mock(spec=host_manager.HostState, host="fake_host",
- uuid=uuids.cn1, cell_uuid=uuids.cell, nodename="fake_node",
- limits={}, aggregates=[])
+ host_state = mock.Mock(
+ spec=host_manager.HostState,
+ host="fake_host",
+ uuid=uuids.cn1,
+ cell_uuid=uuids.cell,
+ nodename="fake_node",
+ limits={},
+ aggregates=[],
+ allocation_candidates=[],
+ )
all_host_states = [host_state]
mock_get_all_states.return_value = all_host_states
@@ -458,20 +468,29 @@ class SchedulerManagerTestCase(test.NoDBTestCase):
is_public=True,
name="small_flavor"),
project_id=uuids.project_id,
- instance_group=group)
-
- host_state = mock.Mock(spec=host_manager.HostState,
- host="fake_host", nodename="fake_node", uuid=uuids.cn1,
- limits={}, cell_uuid=uuids.cell, instances={}, aggregates=[])
+ instance_group=group,
+ requested_resources=[],
+ )
+
+ host_state = mock.Mock(
+ spec=host_manager.HostState,
+ host="fake_host",
+ nodename="fake_node",
+ uuid=uuids.cn1,
+ limits={},
+ cell_uuid=uuids.cell,
+ instances={},
+ aggregates=[],
+ allocation_candidates=[],
+ )
all_host_states = [host_state]
mock_get_all_states.return_value = all_host_states
- mock_get_hosts.return_value = all_host_states
+ mock_get_hosts.side_effect = lambda spec_obj, hosts, num: list(hosts)
instance_uuids = None
ctx = mock.Mock()
selected_hosts = self.manager._schedule(ctx, spec_obj,
- instance_uuids, mock.sentinel.alloc_reqs_by_rp_uuid,
- mock.sentinel.provider_summaries)
+ instance_uuids, None, mock.sentinel.provider_summaries)
mock_get_all_states.assert_called_once_with(
ctx.elevated.return_value, spec_obj,
@@ -509,14 +528,24 @@ class SchedulerManagerTestCase(test.NoDBTestCase):
is_public=True,
name="small_flavor"),
project_id=uuids.project_id,
- instance_group=None)
-
- host_state = mock.Mock(spec=host_manager.HostState,
- host="fake_host", nodename="fake_node", uuid=uuids.cn1,
- cell_uuid=uuids.cell1, limits={}, aggregates=[])
+ instance_group=None,
+ requested_resources=[],
+ )
+
+ host_state = mock.Mock(
+ spec=host_manager.HostState,
+ host="fake_host",
+ nodename="fake_node",
+ uuid=uuids.cn1,
+ cell_uuid=uuids.cell1,
+ limits={},
+ aggregates=[],
+ allocation_candidates=[],
+ )
all_host_states = [host_state]
mock_get_all_states.return_value = all_host_states
- mock_get_hosts.return_value = all_host_states
+ # simulate that every host passes the filtering
+ mock_get_hosts.side_effect = lambda spec_obj, hosts, num: list(hosts)
mock_claim.return_value = True
instance_uuids = [uuids.instance]
@@ -582,11 +611,16 @@ class SchedulerManagerTestCase(test.NoDBTestCase):
project_id=uuids.project_id,
instance_group=None)
- host_state = mock.Mock(spec=host_manager.HostState,
- host=mock.sentinel.host, uuid=uuids.cn1, cell_uuid=uuids.cell1)
+ host_state = mock.Mock(
+ spec=host_manager.HostState,
+ host=mock.sentinel.host,
+ uuid=uuids.cn1,
+ cell_uuid=uuids.cell1,
+ allocations_candidates=[],
+ )
all_host_states = [host_state]
mock_get_all_states.return_value = all_host_states
- mock_get_hosts.return_value = all_host_states
+ mock_get_hosts.side_effect = lambda spec_obj, hosts, num: list(hosts)
mock_claim.return_value = False
instance_uuids = [uuids.instance]
@@ -603,7 +637,7 @@ class SchedulerManagerTestCase(test.NoDBTestCase):
mock_get_all_states.assert_called_once_with(
ctx.elevated.return_value, spec_obj,
mock.sentinel.provider_summaries)
- mock_get_hosts.assert_called_once_with(spec_obj, all_host_states, 0)
+ mock_get_hosts.assert_called_once_with(spec_obj, mock.ANY, 0)
mock_claim.assert_called_once_with(ctx.elevated.return_value,
self.manager.placement_client, spec_obj, uuids.instance,
alloc_reqs_by_rp_uuid[uuids.cn1][0],
@@ -634,18 +668,41 @@ class SchedulerManagerTestCase(test.NoDBTestCase):
is_public=True,
name="small_flavor"),
project_id=uuids.project_id,
- instance_group=None)
-
- host_state = mock.Mock(spec=host_manager.HostState,
- host="fake_host", nodename="fake_node", uuid=uuids.cn1,
- cell_uuid=uuids.cell1, limits={}, updated='fake')
+ instance_group=None,
+ requested_resources=[],
+ )
+
+ host_state = mock.Mock(
+ spec=host_manager.HostState,
+ host="fake_host",
+ nodename="fake_node",
+ uuid=uuids.cn1,
+ cell_uuid=uuids.cell1,
+ limits={},
+ updated="fake",
+ allocation_candidates=[],
+ )
all_host_states = [host_state]
mock_get_all_states.return_value = all_host_states
- mock_get_hosts.side_effect = [
- all_host_states, # first instance: return all the hosts (only one)
- [], # second: act as if no more hosts that meet criteria
- all_host_states, # the final call when creating alternates
- ]
+
+ calls = []
+
+ def fake_get_sorted_hosts(spec_obj, hosts, num):
+ c = len(calls)
+ calls.append(1)
+ # first instance: return all the hosts (only one)
+ if c == 0:
+ return hosts
+ # second: act as if no more hosts that meet criteria
+ elif c == 1:
+ return []
+ # the final call when creating alternates
+ elif c == 2:
+ return hosts
+ else:
+ raise StopIteration()
+
+ mock_get_hosts.side_effect = fake_get_sorted_hosts
mock_claim.return_value = True
instance_uuids = [uuids.instance1, uuids.instance2]
@@ -678,20 +735,44 @@ class SchedulerManagerTestCase(test.NoDBTestCase):
swap=0,
vcpus=1),
project_id=uuids.project_id,
- instance_group=None)
-
- host_state0 = mock.Mock(spec=host_manager.HostState,
- host="fake_host0", nodename="fake_node0", uuid=uuids.cn0,
- cell_uuid=uuids.cell, limits={}, aggregates=[])
- host_state1 = mock.Mock(spec=host_manager.HostState,
- host="fake_host1", nodename="fake_node1", uuid=uuids.cn1,
- cell_uuid=uuids.cell, limits={}, aggregates=[])
- host_state2 = mock.Mock(spec=host_manager.HostState,
- host="fake_host2", nodename="fake_node2", uuid=uuids.cn2,
- cell_uuid=uuids.cell, limits={}, aggregates=[])
+ instance_group=None,
+ requested_resources=[],
+ )
+
+ host_state0 = mock.Mock(
+ spec=host_manager.HostState,
+ host="fake_host0",
+ nodename="fake_node0",
+ uuid=uuids.cn0,
+ cell_uuid=uuids.cell,
+ limits={},
+ aggregates=[],
+ allocation_candidates=[],
+ )
+ host_state1 = mock.Mock(
+ spec=host_manager.HostState,
+ host="fake_host1",
+ nodename="fake_node1",
+ uuid=uuids.cn1,
+ cell_uuid=uuids.cell,
+ limits={},
+ aggregates=[],
+ allocation_candidates=[],
+ )
+ host_state2 = mock.Mock(
+ spec=host_manager.HostState,
+ host="fake_host2",
+ nodename="fake_node2",
+ uuid=uuids.cn2,
+ cell_uuid=uuids.cell,
+ limits={},
+ aggregates=[],
+ allocation_candidates=[],
+ )
all_host_states = [host_state0, host_state1, host_state2]
mock_get_all_states.return_value = all_host_states
- mock_get_hosts.return_value = all_host_states
+ # simulate that every host passes the filtering
+ mock_get_hosts.side_effect = lambda spec_obj, hosts, num: list(hosts)
mock_claim.return_value = True
instance_uuids = [uuids.instance0]
@@ -743,20 +824,44 @@ class SchedulerManagerTestCase(test.NoDBTestCase):
swap=0,
vcpus=1),
project_id=uuids.project_id,
- instance_group=None)
-
- host_state0 = mock.Mock(spec=host_manager.HostState,
- host="fake_host0", nodename="fake_node0", uuid=uuids.cn0,
- cell_uuid=uuids.cell, limits={}, aggregates=[])
- host_state1 = mock.Mock(spec=host_manager.HostState,
- host="fake_host1", nodename="fake_node1", uuid=uuids.cn1,
- cell_uuid=uuids.cell, limits={}, aggregates=[])
- host_state2 = mock.Mock(spec=host_manager.HostState,
- host="fake_host2", nodename="fake_node2", uuid=uuids.cn2,
- cell_uuid=uuids.cell, limits={}, aggregates=[])
+ instance_group=None,
+ requested_resources=[],
+ )
+
+ host_state0 = mock.Mock(
+ spec=host_manager.HostState,
+ host="fake_host0",
+ nodename="fake_node0",
+ uuid=uuids.cn0,
+ cell_uuid=uuids.cell,
+ limits={},
+ aggregates=[],
+ allocation_candidates=[],
+ )
+ host_state1 = mock.Mock(
+ spec=host_manager.HostState,
+ host="fake_host1",
+ nodename="fake_node1",
+ uuid=uuids.cn1,
+ cell_uuid=uuids.cell,
+ limits={},
+ aggregates=[],
+ allocation_candidates=[],
+ )
+ host_state2 = mock.Mock(
+ spec=host_manager.HostState,
+ host="fake_host2",
+ nodename="fake_node2",
+ uuid=uuids.cn2,
+ cell_uuid=uuids.cell,
+ limits={},
+ aggregates=[],
+ allocation_candidates=[],
+ )
all_host_states = [host_state0, host_state1, host_state2]
mock_get_all_states.return_value = all_host_states
- mock_get_hosts.return_value = all_host_states
+ # simulate that every host passes the filtering
+ mock_get_hosts.side_effect = lambda spec_obj, hosts, num: list(hosts)
mock_claim.return_value = True
instance_uuids = [uuids.instance0]
@@ -813,17 +918,36 @@ class SchedulerManagerTestCase(test.NoDBTestCase):
is_public=True,
name="small_flavor"),
project_id=uuids.project_id,
- instance_group=ig, instance_uuid=uuids.instance0)
+ instance_group=ig,
+ instance_uuid=uuids.instance0,
+ requested_resources=[],
+ )
# Reset the RequestSpec changes so they don't interfere with the
# assertion at the end of the test.
spec_obj.obj_reset_changes(recursive=True)
- hs1 = mock.Mock(spec=host_manager.HostState, host='host1',
- nodename="node1", limits={}, uuid=uuids.cn1,
- cell_uuid=uuids.cell1, instances={}, aggregates=[])
- hs2 = mock.Mock(spec=host_manager.HostState, host='host2',
- nodename="node2", limits={}, uuid=uuids.cn2,
- cell_uuid=uuids.cell2, instances={}, aggregates=[])
+ hs1 = mock.Mock(
+ spec=host_manager.HostState,
+ host="host1",
+ nodename="node1",
+ limits={},
+ uuid=uuids.cn1,
+ cell_uuid=uuids.cell1,
+ instances={},
+ aggregates=[],
+ allocation_candidates=[],
+ )
+ hs2 = mock.Mock(
+ spec=host_manager.HostState,
+ host="host2",
+ nodename="node2",
+ limits={},
+ uuid=uuids.cn2,
+ cell_uuid=uuids.cell2,
+ instances={},
+ aggregates=[],
+ allocation_candidates=[],
+ )
all_host_states = [hs1, hs2]
mock_get_all_states.return_value = all_host_states
mock_claim.return_value = True
@@ -837,13 +961,18 @@ class SchedulerManagerTestCase(test.NoDBTestCase):
# _get_sorted_hosts() in the two iterations for each instance in
# num_instances
visited_instances = set([])
+ get_sorted_hosts_called_with_host_states = []
def fake_get_sorted_hosts(_spec_obj, host_states, index):
# Keep track of which instances are passed to the filters.
visited_instances.add(_spec_obj.instance_uuid)
if index % 2:
- return [hs1, hs2]
- return [hs2, hs1]
+ s = list(host_states)
+ get_sorted_hosts_called_with_host_states.append(s)
+ return s
+ s = list(host_states)
+ get_sorted_hosts_called_with_host_states.append(s)
+ return reversed(s)
mock_get_hosts.side_effect = fake_get_sorted_hosts
instance_uuids = [
getattr(uuids, 'instance%d' % x) for x in range(num_instances)
@@ -870,10 +999,14 @@ class SchedulerManagerTestCase(test.NoDBTestCase):
# second time, we pass it the hosts that were returned from
# _get_sorted_hosts() the first time
sorted_host_calls = [
- mock.call(spec_obj, all_host_states, 0),
- mock.call(spec_obj, [hs2, hs1], 1),
+ mock.call(spec_obj, mock.ANY, 0),
+ mock.call(spec_obj, mock.ANY, 1),
]
mock_get_hosts.assert_has_calls(sorted_host_calls)
+ self.assertEqual(
+ all_host_states, get_sorted_hosts_called_with_host_states[0])
+ self.assertEqual(
+ [hs1], get_sorted_hosts_called_with_host_states[1])
# The instance group object should have both host1 and host2 in its
# instance group hosts list and there should not be any "changes" to
@@ -1167,14 +1300,36 @@ class SchedulerManagerTestCase(test.NoDBTestCase):
name="small_flavor"),
project_id=uuids.project_id,
instance_uuid=uuids.instance_id,
- instance_group=None)
-
- host_state = mock.Mock(spec=host_manager.HostState, host="fake_host",
- uuid=uuids.cn1, cell_uuid=uuids.cell, nodename="fake_node",
- limits={}, updated="Not None")
+ instance_group=None,
+ requested_resources=[],
+ )
+
+ host_state = mock.Mock(
+ spec=host_manager.HostState,
+ host="fake_host",
+ uuid=uuids.cn1,
+ cell_uuid=uuids.cell,
+ nodename="fake_node",
+ limits={},
+ updated="Not None",
+ allocation_candidates=[],
+ )
all_host_states = [host_state]
mock_get_all_states.return_value = all_host_states
- mock_get_hosts.side_effect = [all_host_states, []]
+
+ calls = []
+
+ def fake_get_sorted_hosts(spec_obj, hosts, num):
+ c = len(calls)
+ calls.append(1)
+ if c == 0:
+ return list(hosts)
+ elif c == 1:
+ return []
+ else:
+ raise StopIteration
+
+ mock_get_hosts.side_effect = fake_get_sorted_hosts
instance_uuids = [uuids.inst1, uuids.inst2]
fake_allocs_by_rp = {uuids.cn1: [{}]}
@@ -1203,7 +1358,7 @@ class SchedulerManagerTestCase(test.NoDBTestCase):
alloc_reqs[hs.uuid] = [{}]
mock_get_all_hosts.return_value = all_host_states
- mock_sorted.return_value = all_host_states
+ mock_sorted.side_effect = lambda spec_obj, hosts, num: list(hosts)
mock_claim.return_value = True
total_returned = num_alternates + 1
self.flags(max_attempts=total_returned, group="scheduler")
@@ -1211,14 +1366,14 @@ class SchedulerManagerTestCase(test.NoDBTestCase):
for num in range(num_instances)]
spec_obj = objects.RequestSpec(
- num_instances=num_instances,
- flavor=objects.Flavor(memory_mb=512,
- root_gb=512,
- ephemeral_gb=0,
- swap=0,
- vcpus=1),
- project_id=uuids.project_id,
- instance_group=None)
+ num_instances=num_instances,
+ flavor=objects.Flavor(
+ memory_mb=512, root_gb=512, ephemeral_gb=0, swap=0, vcpus=1
+ ),
+ project_id=uuids.project_id,
+ instance_group=None,
+ requested_resources=[],
+ )
dests = self.manager._schedule(self.context, spec_obj,
instance_uuids, alloc_reqs, None, return_alternates=True)
@@ -1269,11 +1424,24 @@ class SchedulerManagerTestCase(test.NoDBTestCase):
alloc_reqs[hs.uuid] = [{}]
mock_get_all_hosts.return_value = all_host_states
+
# There are two instances so _get_sorted_hosts is called once per
# instance and then once again before picking alternates.
- mock_sorted.side_effect = [all_host_states,
- list(reversed(all_host_states)),
- all_host_states]
+ calls = []
+
+ def fake_get_sorted_hosts(spec_obj, hosts, num):
+ c = len(calls)
+ calls.append(1)
+ if c == 0:
+ return list(hosts)
+ elif c == 1:
+ return list(reversed(all_host_states))
+ elif c == 2:
+ return list(hosts)
+ else:
+ raise StopIteration()
+
+ mock_sorted.side_effect = fake_get_sorted_hosts
mock_claim.return_value = True
total_returned = 3
self.flags(max_attempts=total_returned, group="scheduler")
@@ -1281,14 +1449,14 @@ class SchedulerManagerTestCase(test.NoDBTestCase):
num_instances = len(instance_uuids)
spec_obj = objects.RequestSpec(
- num_instances=num_instances,
- flavor=objects.Flavor(memory_mb=512,
- root_gb=512,
- ephemeral_gb=0,
- swap=0,
- vcpus=1),
- project_id=uuids.project_id,
- instance_group=None)
+ num_instances=num_instances,
+ flavor=objects.Flavor(
+ memory_mb=512, root_gb=512, ephemeral_gb=0, swap=0, vcpus=1
+ ),
+ project_id=uuids.project_id,
+ instance_group=None,
+ requested_resources=[],
+ )
dests = self.manager._schedule(self.context, spec_obj,
instance_uuids, alloc_reqs, None, return_alternates=True)
@@ -1322,7 +1490,7 @@ class SchedulerManagerTestCase(test.NoDBTestCase):
alloc_reqs[hs.uuid] = [{}]
mock_get_all_hosts.return_value = all_host_states
- mock_sorted.return_value = all_host_states
+ mock_sorted.side_effect = lambda spec_obj, hosts, num: list(hosts)
mock_claim.return_value = True
# Set the total returned to more than the number of available hosts
self.flags(max_attempts=max_attempts, group="scheduler")
@@ -1330,14 +1498,14 @@ class SchedulerManagerTestCase(test.NoDBTestCase):
num_instances = len(instance_uuids)
spec_obj = objects.RequestSpec(
- num_instances=num_instances,
- flavor=objects.Flavor(memory_mb=512,
- root_gb=512,
- ephemeral_gb=0,
- swap=0,
- vcpus=1),
- project_id=uuids.project_id,
- instance_group=None)
+ num_instances=num_instances,
+ flavor=objects.Flavor(
+ memory_mb=512, root_gb=512, ephemeral_gb=0, swap=0, vcpus=1
+ ),
+ project_id=uuids.project_id,
+ instance_group=None,
+ requested_resources=[],
+ )
dests = self.manager._schedule(self.context, spec_obj,
instance_uuids, alloc_reqs, None, return_alternates=True)
@@ -1520,3 +1688,541 @@ class SchedulerManagerTestCase(test.NoDBTestCase):
self.manager._discover_hosts_in_cells(mock.sentinel.context)
mock_log_warning.assert_not_called()
mock_log_debug.assert_called_once_with(msg)
+
+ @mock.patch('nova.scheduler.client.report.report_client_singleton')
+ @mock.patch.object(manager, 'LOG')
+ @mock.patch('nova.scheduler.host_manager.HostManager')
+ @mock.patch('nova.servicegroup.API')
+ @mock.patch('nova.rpc.get_notifier')
+ def test_init_lazy_placement_client(self, mock_rpc, mock_sg, mock_hm,
+ mock_log, mock_report):
+ # Simulate keytone or placement being offline at startup
+ mock_report.side_effect = ks_exc.RequestTimeout
+ mgr = manager.SchedulerManager()
+ mock_report.assert_called_once_with()
+ self.assertTrue(mock_log.warning.called)
+
+ # Make sure we're raising the actual error to subsequent callers
+ self.assertRaises(ks_exc.RequestTimeout, lambda: mgr.placement_client)
+
+ # Simulate recovery of the keystone or placement service
+ mock_report.reset_mock(side_effect=True)
+ mgr.placement_client
+ mock_report.assert_called_once_with()
+
+ @mock.patch('nova.scheduler.client.report.report_client_singleton')
+ @mock.patch('nova.scheduler.host_manager.HostManager')
+ @mock.patch('nova.servicegroup.API')
+ @mock.patch('nova.rpc.get_notifier')
+ def test_init_lazy_placement_client_failures(self, mock_rpc, mock_sg,
+ mock_hm, mock_report):
+ # Certain keystoneclient exceptions are fatal
+ mock_report.side_effect = ks_exc.Unauthorized
+ self.assertRaises(ks_exc.Unauthorized, manager.SchedulerManager)
+
+ # Anything else is fatal
+ mock_report.side_effect = test.TestingException
+ self.assertRaises(test.TestingException, manager.SchedulerManager)
+
+
+class SchedulerManagerAllocationCandidateTestCase(test.NoDBTestCase):
+
+ class ACRecorderFilter(filters.BaseHostFilter):
+ """A filter that records what allocation candidates it saw on each host
+ """
+
+ def __init__(self):
+ super().__init__()
+ self.seen_candidates = []
+
+ def host_passes(self, host_state, filter_properties):
+ # record what candidate the filter saw for each host
+ self.seen_candidates.append(list(host_state.allocation_candidates))
+ return True
+
+ class DropFirstFilter(filters.BaseHostFilter):
+ """A filter that removes one candidate and keeps the rest on each
+ host
+ """
+
+ def host_passes(self, host_state, filter_properties):
+ host_state.allocation_candidates.pop(0)
+ return bool(host_state.allocation_candidates)
+
+ @mock.patch.object(
+ host_manager.HostManager, '_init_instance_info', new=mock.Mock())
+ @mock.patch.object(
+ host_manager.HostManager, '_init_aggregates', new=mock.Mock())
+ def setUp(self):
+ super().setUp()
+ self.context = context.RequestContext('fake_user', 'fake_project')
+ self.manager = manager.SchedulerManager()
+ self.manager.host_manager.weighers = []
+ self.request_spec = objects.RequestSpec(
+ ignore_hosts=[],
+ force_hosts=[],
+ force_nodes=[],
+ requested_resources=[],
+ )
+
+ @mock.patch("nova.objects.selection.Selection.from_host_state")
+ @mock.patch(
+ "nova.scheduler.manager.SchedulerManager._consume_selected_host",
+ )
+ @mock.patch(
+ "nova.scheduler.utils.claim_resources",
+ new=mock.Mock(return_value=True),
+ )
+ @mock.patch("nova.scheduler.manager.SchedulerManager._get_all_host_states")
+ def test_filters_see_allocation_candidates_for_each_host(
+ self,
+ mock_get_all_host_states,
+ mock_consume,
+ mock_selection_from_host_state,
+ ):
+ # have a single filter configured where we can assert that the filter
+ # see the allocation_candidates of each host
+ filter = self.ACRecorderFilter()
+ self.manager.host_manager.enabled_filters = [filter]
+
+ instance_uuids = [uuids.inst1]
+
+ alloc_reqs_by_rp_uuid = {}
+ # have two hosts with different candidates
+ host1 = host_manager.HostState("host1", "node1", uuids.cell1)
+ host1.uuid = uuids.host1
+ alloc_reqs_by_rp_uuid[uuids.host1] = [
+ mock.sentinel.host1_a_c_1,
+ mock.sentinel.host1_a_c_2,
+ ]
+ host2 = host_manager.HostState("host2", "node2", uuids.cell1)
+ host2.uuid = uuids.host2
+ alloc_reqs_by_rp_uuid[uuids.host2] = [
+ mock.sentinel.host2_a_c_1,
+ ]
+ mock_get_all_host_states.return_value = iter([host1, host2])
+
+ self.manager._schedule(
+ self.context,
+ self.request_spec,
+ instance_uuids,
+ alloc_reqs_by_rp_uuid,
+ mock.sentinel.provider_summaries,
+ mock.sentinel.allocation_request_version,
+ )
+
+ # we expect that our filter seen the allocation candidate list of
+ # each host respectively
+ self.assertEqual(
+ [
+ alloc_reqs_by_rp_uuid[uuids.host1],
+ alloc_reqs_by_rp_uuid[uuids.host2],
+ ],
+ filter.seen_candidates,
+ )
+
+ @mock.patch(
+ "nova.scheduler.manager.SchedulerManager._consume_selected_host",
+ )
+ @mock.patch(
+ "nova.scheduler.utils.claim_resources",
+ new=mock.Mock(return_value=True),
+ )
+ @mock.patch("nova.scheduler.manager.SchedulerManager._get_all_host_states")
+ def test_scheduler_selects_filtered_a_c_from_hosts_state(
+ self,
+ mock_get_all_host_states,
+ mock_consume,
+ ):
+ """Assert that if a filter removes an allocation candidate from a host
+ then even if that host is selected the removed allocation candidate
+ is not used by the scheduler.
+ """
+
+ self.manager.host_manager.enabled_filters = [self.DropFirstFilter()]
+
+ instance_uuids = [uuids.inst1]
+ alloc_reqs_by_rp_uuid = {}
+ # have a host with two candidates
+ host1 = host_manager.HostState("host1", "node1", uuids.cell1)
+ host1.uuid = uuids.host1
+ alloc_reqs_by_rp_uuid[uuids.host1] = [
+ "host1-candidate1",
+ "host1-candidate2",
+ ]
+ mock_get_all_host_states.return_value = iter([host1])
+
+ result = self.manager._schedule(
+ self.context,
+ self.request_spec,
+ instance_uuids,
+ alloc_reqs_by_rp_uuid,
+ mock.sentinel.provider_summaries,
+ 'fake-alloc-req-version',
+ )
+ # we have requested one instance to be scheduled so expect on set
+ # of selections
+ self.assertEqual(1, len(result))
+ selections = result[0]
+ # we did not ask for alternatives so a single selection is expected
+ self.assertEqual(1, len(selections))
+ selection = selections[0]
+ # we expect that candidate2 is used as candidate1 is dropped by
+ # the filter
+ self.assertEqual(
+ "host1-candidate2",
+ jsonutils.loads(selection.allocation_request)
+ )
+
+ @mock.patch("nova.objects.selection.Selection.from_host_state")
+ @mock.patch(
+ "nova.scheduler.manager.SchedulerManager._consume_selected_host",
+ )
+ @mock.patch(
+ "nova.scheduler.utils.claim_resources",
+ new=mock.Mock(return_value=True),
+ )
+ @mock.patch("nova.scheduler.manager.SchedulerManager._get_all_host_states")
+ def test_consecutive_filter_sees_filtered_a_c_list(
+ self,
+ mock_get_all_host_states,
+ mock_consume,
+ mock_selection_from_host_state,
+ ):
+ # create two filters
+ # 1) DropFirstFilter runs first and drops the first candidate from each
+ # host
+ # 2) ACRecorderFilter runs next and records what candidates it saw
+ recorder_filter = self.ACRecorderFilter()
+ self.manager.host_manager.enabled_filters = [
+ self.DropFirstFilter(),
+ recorder_filter,
+ ]
+
+ instance_uuids = [uuids.inst1]
+ alloc_reqs_by_rp_uuid = {}
+ # have a host with two candidates
+ host1 = host_manager.HostState("host1", "node1", uuids.cell1)
+ host1.uuid = uuids.host1
+ alloc_reqs_by_rp_uuid[uuids.host1] = [
+ "host1-candidate1",
+ "host1-candidate2",
+ ]
+ mock_get_all_host_states.return_value = iter([host1])
+
+ self.manager._schedule(
+ self.context,
+ self.request_spec,
+ instance_uuids,
+ alloc_reqs_by_rp_uuid,
+ mock.sentinel.provider_summaries,
+ 'fake-alloc-req-version',
+ )
+
+ # we expect that the second filter saw one host with one candidate and
+ # as candidate1 was already filtered out by the run of the first filter
+ self.assertEqual(
+ [["host1-candidate2"]],
+ recorder_filter.seen_candidates
+ )
+
+ @mock.patch(
+ "nova.scheduler.manager.SchedulerManager._consume_selected_host",
+ )
+ @mock.patch(
+ "nova.scheduler.utils.claim_resources",
+ new=mock.Mock(return_value=True),
+ )
+ @mock.patch("nova.scheduler.manager.SchedulerManager._get_all_host_states")
+ def test_filters_removes_all_a_c_host_is_not_selected(
+ self,
+ mock_get_all_host_states,
+ mock_consume,
+ ):
+ # use the filter that always drops the first candidate on each host
+ self.manager.host_manager.enabled_filters = [self.DropFirstFilter()]
+
+ instance_uuids = [uuids.inst1]
+ alloc_reqs_by_rp_uuid = {}
+ # have two hosts
+ # first with a single candidate
+ host1 = host_manager.HostState("host1", "node1", uuids.cell1)
+ host1.uuid = uuids.host1
+ alloc_reqs_by_rp_uuid[uuids.host1] = [
+ "host1-candidate1",
+ ]
+ # second with two candidates
+ host2 = host_manager.HostState("host2", "node2", uuids.cell1)
+ host2.uuid = uuids.host2
+ alloc_reqs_by_rp_uuid[uuids.host2] = [
+ "host2-candidate1",
+ "host2-candidate2",
+ ]
+ mock_get_all_host_states.return_value = iter([host1, host2])
+
+ result = self.manager._schedule(
+ self.context,
+ self.request_spec,
+ instance_uuids,
+ alloc_reqs_by_rp_uuid,
+ mock.sentinel.provider_summaries,
+ 'fake-alloc-req-version',
+ )
+ # we expect that the first host is not selected as the filter
+ # removed every candidate from the host
+ # also we expect that on the second host only candidate2 could have
+ # been selected
+ # we asked for one instance, so we expect one set of selections
+ self.assertEqual(1, len(result))
+ selections = result[0]
+ # we did not ask for alternatives so a single selection is expected
+ self.assertEqual(1, len(selections))
+ selection = selections[0]
+ # we expect that candidate2 is used as candidate1 is dropped by
+ # the filter
+ self.assertEqual(uuids.host2, selection.compute_node_uuid)
+ self.assertEqual(
+ "host2-candidate2",
+ jsonutils.loads(selection.allocation_request)
+ )
+
+ @mock.patch(
+ "nova.scheduler.manager.SchedulerManager._consume_selected_host",
+ )
+ @mock.patch(
+ "nova.scheduler.utils.claim_resources",
+ new=mock.Mock(return_value=True),
+ )
+ @mock.patch("nova.scheduler.manager.SchedulerManager._get_all_host_states")
+ def test_consume_selected_host_sees_updated_request_spec(
+ self,
+ mock_get_all_host_states,
+ mock_consume,
+ ):
+ # simulate that nothing is filtered out, by not having any filters
+ self.manager.host_manager.enabled_filters = []
+
+ # set up the request spec with a request group to be updated
+ # by the selected candidate
+ self.request_spec.requested_resources = [
+ objects.RequestGroup(
+ requester_id=uuids.group_req1, provider_uuids=[]
+ )
+ ]
+
+ instance_uuids = [uuids.inst1]
+ alloc_reqs_by_rp_uuid = {}
+ # have single host with a single candidate
+ # first with a single candidate
+ host1 = host_manager.HostState("host1", "node1", uuids.cell1)
+ host1.uuid = uuids.host1
+ # simulate that placement fulfilled the above RequestGroup from
+ # a certain child RP of the host.
+ alloc_reqs_by_rp_uuid[uuids.host1] = [
+ {
+ "mappings": {
+ "": [uuids.host1],
+ uuids.group_req1: [uuids.host1_child_rp],
+ }
+ }
+ ]
+ mock_get_all_host_states.return_value = iter([host1])
+
+ # make asserts on the request_spec passed to consume
+ def assert_request_spec_updated_with_selected_candidate(
+ selected_host, spec_obj, instance_uuid=None
+ ):
+ # we expect that the scheduler updated the request_spec based
+ # the selected candidate before called consume
+ self.assertEqual(
+ [uuids.host1_child_rp],
+ spec_obj.requested_resources[0].provider_uuids,
+ )
+
+ mock_consume.side_effect = (
+ assert_request_spec_updated_with_selected_candidate)
+
+ self.manager._schedule(
+ self.context,
+ self.request_spec,
+ instance_uuids,
+ alloc_reqs_by_rp_uuid,
+ mock.sentinel.provider_summaries,
+ 'fake-alloc-req-version',
+ )
+
+ mock_consume.assert_called_once()
+
+ @mock.patch(
+ "nova.scheduler.manager.SchedulerManager._consume_selected_host",
+ )
+ @mock.patch(
+ "nova.scheduler.utils.claim_resources",
+ return_value=True,
+ )
+ @mock.patch("nova.scheduler.manager.SchedulerManager._get_all_host_states")
+ def test_get_alternate_hosts_returns_main_selection_with_claimed_a_c(
+ self,
+ mock_get_all_host_states,
+ mock_claim,
+ mock_consume,
+ ):
+ """Assert that the first (a.k.a main) selection returned for an
+ instance always maps to the allocation candidate, that was claimed by
+ the scheduler in placement.
+ """
+ # use the filter that always drops the first candidate on each host
+ self.manager.host_manager.enabled_filters = [self.DropFirstFilter()]
+
+ instance_uuids = [uuids.inst1]
+ alloc_reqs_by_rp_uuid = {}
+ # have one host with 3 candidates each fulfilling a request group
+ # from different child RP
+ host1 = host_manager.HostState("host1", "node1", uuids.cell1)
+ host1.uuid = uuids.host1
+ alloc_reqs_by_rp_uuid[uuids.host1] = [
+ {
+ "mappings": {
+ # This is odd but the un-name request group uses "" as the
+ # name of the group.
+ "": [uuids.host1],
+ uuids.group_req1: [getattr(uuids, f"host1_child{i}")],
+ }
+ } for i in [1, 2, 3]
+ ]
+ mock_get_all_host_states.return_value = iter([host1])
+
+ result = self.manager._schedule(
+ self.context,
+ self.request_spec,
+ instance_uuids,
+ alloc_reqs_by_rp_uuid,
+ mock.sentinel.provider_summaries,
+ 'fake-alloc-req-version',
+ return_alternates=True,
+ )
+
+ # we scheduled one instance
+ self.assertEqual(1, len(result))
+ selections = result[0]
+ # we did not ask for alternatives
+ self.assertEqual(1, len(selections))
+ selection = selections[0]
+ self.assertEqual(uuids.host1, selection.compute_node_uuid)
+ # we expect that host1_child2 candidate is selected as the
+ # DropFirstFilter will drop host1_child1
+ expected_a_c = {
+ "mappings": {
+ "": [uuids.host1],
+ uuids.group_req1: [uuids.host1_child2],
+ }
+ }
+ self.assertEqual(
+ expected_a_c,
+ jsonutils.loads(selection.allocation_request),
+ )
+ # and we expect that the same candidate was claimed in placement
+ mock_claim.assert_called_once_with(
+ mock.ANY,
+ self.manager.placement_client,
+ self.request_spec,
+ uuids.inst1,
+ expected_a_c,
+ allocation_request_version="fake-alloc-req-version",
+ )
+
+ @mock.patch(
+ "nova.scheduler.manager.SchedulerManager._consume_selected_host",
+ )
+ @mock.patch(
+ "nova.scheduler.utils.claim_resources",
+ return_value=True,
+ )
+ @mock.patch("nova.scheduler.manager.SchedulerManager._get_all_host_states")
+ def test_get_alternate_hosts_returns_alts_with_filtered_a_c(
+ self,
+ mock_get_all_host_states,
+ mock_claim,
+ mock_consume,
+ ):
+ """Assert that alternate generation also works based on filtered
+ candidates.
+ """
+
+ class RPFilter(filters.BaseHostFilter):
+ """A filter that only allows candidates with specific RPs"""
+
+ def __init__(self, allowed_rp_uuids):
+ self.allowed_rp_uuids = allowed_rp_uuids
+
+ def host_passes(self, host_state, filter_properties):
+ host_state.allocation_candidates = [
+ a_c
+ for a_c in host_state.allocation_candidates
+ if a_c["mappings"][uuids.group_req1][0]
+ in self.allowed_rp_uuids
+ ]
+ return True
+
+ instance_uuids = [uuids.inst1]
+ alloc_reqs_by_rp_uuid = {}
+ # have 3 hosts each with 2 allocation candidates fulfilling a request
+ # group from a different child RP
+ hosts = []
+ for i in [1, 2, 3]:
+ host = host_manager.HostState(f"host{i}", f"node{i}", uuids.cell1)
+ host.uuid = getattr(uuids, f"host{i}")
+ alloc_reqs_by_rp_uuid[host.uuid] = [
+ {
+ "mappings": {
+ "": [host.uuid],
+ uuids.group_req1: [
+ getattr(uuids, f"host{i}_child{j}")
+ ],
+ }
+ }
+ for j in [1, 2]
+ ]
+ hosts.append(host)
+ mock_get_all_host_states.return_value = iter(hosts)
+
+ # configure a filter that only "likes" host1_child2 and host3_child2
+ # RPs. This means host2 is totally out and host1 and host3 only have
+ # one viable candidate
+ self.manager.host_manager.enabled_filters = [
+ RPFilter(allowed_rp_uuids=[uuids.host1_child2, uuids.host3_child2])
+ ]
+
+ result = self.manager._schedule(
+ self.context,
+ self.request_spec,
+ instance_uuids,
+ alloc_reqs_by_rp_uuid,
+ mock.sentinel.provider_summaries,
+ 'fake-alloc-req-version',
+ return_alternates=True,
+ )
+ # we scheduled one instance
+ self.assertEqual(1, len(result))
+ selections = result[0]
+ # we expect a main selection and a single alternative
+ # (host1, and host3) on both selection we expect child2 as selected
+ # candidate
+ self.assertEqual(2, len(selections))
+ main_selection = selections[0]
+ self.assertEqual(uuids.host1, main_selection.compute_node_uuid)
+ self.assertEqual(
+ [uuids.host1_child2],
+ jsonutils.loads(main_selection.allocation_request)["mappings"][
+ uuids.group_req1
+ ],
+ )
+
+ alt_selection = selections[1]
+ self.assertEqual(uuids.host3, alt_selection.compute_node_uuid)
+ self.assertEqual(
+ [uuids.host3_child2],
+ jsonutils.loads(alt_selection.allocation_request)["mappings"][
+ uuids.group_req1
+ ],
+ )
diff --git a/nova/tests/unit/scheduler/test_request_filter.py b/nova/tests/unit/scheduler/test_request_filter.py
index 7be7f8341d..77e538006a 100644
--- a/nova/tests/unit/scheduler/test_request_filter.py
+++ b/nova/tests/unit/scheduler/test_request_filter.py
@@ -10,8 +10,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
import os_traits as ot
+from unittest import mock
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import timeutils
@@ -406,13 +406,15 @@ class TestRequestFilter(test.NoDBTestCase):
self.assertIn('took %.1f seconds', log_lines[1])
@mock.patch.object(request_filter, 'LOG', new=mock.Mock())
- def test_transform_image_metadata(self):
+ def test_transform_image_metadata_x86(self):
self.flags(image_metadata_prefilter=True, group='scheduler')
properties = objects.ImageMetaProps(
hw_disk_bus=objects.fields.DiskBus.SATA,
hw_cdrom_bus=objects.fields.DiskBus.IDE,
hw_video_model=objects.fields.VideoModel.QXL,
- hw_vif_model=network_model.VIF_MODEL_VIRTIO
+ hw_vif_model=network_model.VIF_MODEL_VIRTIO,
+ hw_architecture=objects.fields.Architecture.X86_64,
+ hw_emulation_architecture=objects.fields.Architecture.AARCH64
)
reqspec = objects.RequestSpec(
image=objects.ImageMeta(properties=properties),
@@ -426,6 +428,36 @@ class TestRequestFilter(test.NoDBTestCase):
'COMPUTE_NET_VIF_MODEL_VIRTIO',
'COMPUTE_STORAGE_BUS_IDE',
'COMPUTE_STORAGE_BUS_SATA',
+ 'HW_ARCH_X86_64',
+ 'COMPUTE_ARCH_AARCH64',
+ }
+ self.assertEqual(expected, reqspec.root_required)
+
+ @mock.patch.object(request_filter, 'LOG', new=mock.Mock())
+ def test_transform_image_metadata_aarch64(self):
+ self.flags(image_metadata_prefilter=True, group='scheduler')
+ properties = objects.ImageMetaProps(
+ hw_disk_bus=objects.fields.DiskBus.SATA,
+ hw_cdrom_bus=objects.fields.DiskBus.IDE,
+ hw_video_model=objects.fields.VideoModel.QXL,
+ hw_vif_model=network_model.VIF_MODEL_VIRTIO,
+ hw_architecture=objects.fields.Architecture.AARCH64,
+ hw_emulation_architecture=objects.fields.Architecture.X86_64
+ )
+ reqspec = objects.RequestSpec(
+ image=objects.ImageMeta(properties=properties),
+ flavor=objects.Flavor(extra_specs={}),
+ )
+ self.assertTrue(
+ request_filter.transform_image_metadata(None, reqspec)
+ )
+ expected = {
+ 'COMPUTE_GRAPHICS_MODEL_QXL',
+ 'COMPUTE_NET_VIF_MODEL_VIRTIO',
+ 'COMPUTE_STORAGE_BUS_IDE',
+ 'COMPUTE_STORAGE_BUS_SATA',
+ 'HW_ARCH_AARCH64',
+ 'COMPUTE_ARCH_X86_64',
}
self.assertEqual(expected, reqspec.root_required)
@@ -580,3 +612,90 @@ class TestRequestFilter(test.NoDBTestCase):
mock_get_aggs_network.assert_has_calls([
mock.call(self.context, mock.ANY, mock.ANY, uuids.net1),
mock.call(self.context, mock.ANY, mock.ANY, uuids.net2)])
+
+ def test_ephemeral_encryption_filter_no_encryption(self):
+ # First ensure that ephemeral_encryption_filter is included
+ self.assertIn(request_filter.ephemeral_encryption_filter,
+ request_filter.ALL_REQUEST_FILTERS)
+
+ reqspec = objects.RequestSpec(
+ flavor=objects.Flavor(extra_specs={}),
+ image=objects.ImageMeta(
+ properties=objects.ImageMetaProps()))
+
+ self.assertEqual(set(), reqspec.root_required)
+ self.assertEqual(set(), reqspec.root_forbidden)
+
+ # Assert that the filter returns false and doesn't update the reqspec
+ self.assertFalse(
+ request_filter.ephemeral_encryption_filter(self.context, reqspec))
+ self.assertEqual(set(), reqspec.root_required)
+ self.assertEqual(set(), reqspec.root_forbidden)
+
+ def test_ephemeral_encryption_filter_encryption_disabled(self):
+ # First ensure that ephemeral_encryption_filter is included
+ self.assertIn(request_filter.ephemeral_encryption_filter,
+ request_filter.ALL_REQUEST_FILTERS)
+
+ reqspec = objects.RequestSpec(
+ flavor=objects.Flavor(extra_specs={}),
+ image=objects.ImageMeta(
+ properties=objects.ImageMetaProps(
+ hw_ephemeral_encryption=False)))
+ self.assertFalse(
+ request_filter.ephemeral_encryption_filter(
+ self.context, reqspec))
+ self.assertEqual(set(), reqspec.root_required)
+ self.assertEqual(set(), reqspec.root_forbidden)
+
+ reqspec = objects.RequestSpec(
+ flavor=objects.Flavor(extra_specs={
+ 'hw:ephemeral_encryption': 'False'}),
+ image=objects.ImageMeta(
+ properties=objects.ImageMetaProps()))
+ self.assertFalse(
+ request_filter.ephemeral_encryption_filter(
+ self.context, reqspec))
+ self.assertEqual(set(), reqspec.root_required)
+ self.assertEqual(set(), reqspec.root_forbidden)
+
+ def test_ephemeral_encryption_filter_encryption_no_format(self):
+ # First ensure that ephemeral_encryption_filter is included
+ self.assertIn(request_filter.ephemeral_encryption_filter,
+ request_filter.ALL_REQUEST_FILTERS)
+
+ reqspec = objects.RequestSpec(
+ flavor=objects.Flavor(extra_specs={
+ 'hw:ephemeral_encryption': 'True'}),
+ image=objects.ImageMeta(
+ properties=objects.ImageMetaProps()))
+ self.assertEqual(set(), reqspec.root_required)
+ self.assertEqual(set(), reqspec.root_forbidden)
+ self.assertTrue(
+ request_filter.ephemeral_encryption_filter(self.context, reqspec))
+ self.assertEqual(
+ {ot.COMPUTE_EPHEMERAL_ENCRYPTION}, reqspec.root_required)
+ self.assertEqual(set(), reqspec.root_forbidden)
+
+ def test_ephemeral_encryption_filter_encryption_and_format(self):
+ # First ensure that ephemeral_encryption_filter is included
+ self.assertIn(request_filter.ephemeral_encryption_filter,
+ request_filter.ALL_REQUEST_FILTERS)
+
+ reqspec = objects.RequestSpec(
+ flavor=objects.Flavor(
+ extra_specs={
+ 'hw:ephemeral_encryption': 'True',
+ 'hw:ephemeral_encryption_format': 'luks'
+ }),
+ image=objects.ImageMeta(
+ properties=objects.ImageMetaProps()))
+ self.assertEqual(set(), reqspec.root_required)
+ self.assertEqual(set(), reqspec.root_forbidden)
+ self.assertTrue(
+ request_filter.ephemeral_encryption_filter(self.context, reqspec))
+ self.assertEqual(
+ {ot.COMPUTE_EPHEMERAL_ENCRYPTION,
+ ot.COMPUTE_EPHEMERAL_ENCRYPTION_LUKS},
+ reqspec.root_required)
+ self.assertEqual(set(), reqspec.root_forbidden)
diff --git a/nova/tests/unit/scheduler/test_rpcapi.py b/nova/tests/unit/scheduler/test_rpcapi.py
index 3c56946975..51582891aa 100644
--- a/nova/tests/unit/scheduler/test_rpcapi.py
+++ b/nova/tests/unit/scheduler/test_rpcapi.py
@@ -16,7 +16,8 @@
Unit Tests for nova.scheduler.rpcapi
"""
-import mock
+from unittest import mock
+
from oslo_utils.fixture import uuidsentinel as uuids
from nova import conf
diff --git a/nova/tests/unit/scheduler/test_utils.py b/nova/tests/unit/scheduler/test_utils.py
index 8aff5b902e..55957f3d55 100644
--- a/nova/tests/unit/scheduler/test_utils.py
+++ b/nova/tests/unit/scheduler/test_utils.py
@@ -10,8 +10,9 @@
# License for the specific language governing permissions and limitations
# under the License.
+from unittest import mock
+
import ddt
-import mock
import os_resource_classes as orc
from oslo_serialization import jsonutils
from oslo_utils.fixture import uuidsentinel as uuids
diff --git a/nova/tests/unit/scheduler/weights/test_weights_affinity.py b/nova/tests/unit/scheduler/weights/test_weights_affinity.py
index 10ec7e698d..3048e9f06c 100644
--- a/nova/tests/unit/scheduler/weights/test_weights_affinity.py
+++ b/nova/tests/unit/scheduler/weights/test_weights_affinity.py
@@ -13,7 +13,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from nova import objects
from nova.scheduler import weights
diff --git a/nova/tests/unit/scheduler/weights/test_weights_hypervisor_version.py b/nova/tests/unit/scheduler/weights/test_weights_hypervisor_version.py
new file mode 100644
index 0000000000..c6e4abd4cd
--- /dev/null
+++ b/nova/tests/unit/scheduler/weights/test_weights_hypervisor_version.py
@@ -0,0 +1,97 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+Tests For Scheduler hypervisor version weights.
+"""
+
+from nova.scheduler import weights
+from nova.scheduler.weights import hypervisor_version
+from nova import test
+from nova.tests.unit.scheduler import fakes
+
+
+class HypervisorVersionWeigherTestCase(test.NoDBTestCase):
+ def setUp(self):
+ super().setUp()
+ self.weight_handler = weights.HostWeightHandler()
+ self.weighers = [hypervisor_version.HypervisorVersionWeigher()]
+
+ def _get_weighed_host(self, hosts, weight_properties=None):
+ if weight_properties is None:
+ weight_properties = {}
+ return self.weight_handler.get_weighed_objects(self.weighers,
+ hosts, weight_properties)[0]
+
+ def _get_all_hosts(self):
+ host_values = [
+ ('host1', 'node1', {'hypervisor_version': 1}),
+ ('host2', 'node2', {'hypervisor_version': 200}),
+ ('host3', 'node3', {'hypervisor_version': 100}),
+ ('host4', 'node4', {'hypervisor_version': 1000}),
+ ]
+ return [fakes.FakeHostState(host, node, values)
+ for host, node, values in host_values]
+
+ def test_multiplier_default(self):
+ hostinfo_list = self._get_all_hosts()
+ weighed_host = self._get_weighed_host(hostinfo_list)
+ self.assertEqual(1.0, weighed_host.weight)
+ self.assertEqual('host4', weighed_host.obj.host)
+
+ def test_multiplier_default_full_ordering(self):
+ hostinfo_list = self._get_all_hosts()
+ weighed_hosts = self.weight_handler.get_weighed_objects(
+ self.weighers, hostinfo_list, {}
+ )
+ expected_hosts = [fakes.FakeHostState(host, node, values)
+ for host, node, values in [
+ ('host4', 'node4', {'hypervisor_version': 1000}),
+ ('host2', 'node2', {'hypervisor_version': 200}),
+ ('host3', 'node3', {'hypervisor_version': 100}),
+ ('host1', 'node1', {'hypervisor_version': 1}),
+ ]]
+ for actual, expected in zip(
+ weighed_hosts,
+ expected_hosts
+ ):
+ self.assertEqual(actual.obj.host, expected.host)
+
+ def test_multiplier_none(self):
+ multi = 0.0
+ self.flags(
+ hypervisor_version_weight_multiplier=multi,
+ group='filter_scheduler'
+ )
+ hostinfo_list = self._get_all_hosts()
+ weighed_host = self._get_weighed_host(hostinfo_list)
+ self.assertEqual(multi, weighed_host.weight)
+
+ def test_multiplier_positive(self):
+ multi = 2.0
+ self.flags(
+ hypervisor_version_weight_multiplier=multi,
+ group='filter_scheduler'
+ )
+ hostinfo_list = self._get_all_hosts()
+ weighed_host = self._get_weighed_host(hostinfo_list)
+ self.assertEqual(1.0 * multi, weighed_host.weight)
+ self.assertEqual('host4', weighed_host.obj.host)
+
+ def test_multiplier_negative(self):
+ multi = -1.0
+ self.flags(
+ hypervisor_version_weight_multiplier=multi,
+ group='filter_scheduler'
+ )
+ hostinfo_list = self._get_all_hosts()
+ weighed_host = self._get_weighed_host(hostinfo_list)
+ self.assertEqual('host1', weighed_host.obj.host)
diff --git a/nova/tests/unit/scheduler/weights/test_weights_metrics.py b/nova/tests/unit/scheduler/weights/test_weights_metrics.py
index 21667813e3..d507000b12 100644
--- a/nova/tests/unit/scheduler/weights/test_weights_metrics.py
+++ b/nova/tests/unit/scheduler/weights/test_weights_metrics.py
@@ -114,7 +114,7 @@ class MetricsWeigherTestCase(test.NoDBTestCase):
setting = [idle + '=-2', idle + '=1']
self._do_test(setting, 1.0, 'host1')
- def test_single_resourcenegtive_ratio(self):
+ def test_single_resourcenegative_ratio(self):
# host1: idle=512
# host2: idle=1024
# host3: idle=3072
diff --git a/nova/tests/unit/scheduler/weights/test_weights_pci.py b/nova/tests/unit/scheduler/weights/test_weights_pci.py
index d257c67c1f..3bdc94f357 100644
--- a/nova/tests/unit/scheduler/weights/test_weights_pci.py
+++ b/nova/tests/unit/scheduler/weights/test_weights_pci.py
@@ -127,7 +127,7 @@ class PCIWeigherTestCase(test.NoDBTestCase):
"""Test weigher with a PCI device instance and huge hosts.
Ensure that the weigher gracefully degrades when the number of PCI
- devices on the host exceeeds MAX_DEVS.
+ devices on the host exceeds MAX_DEVS.
"""
hosts = [
('host1', 'node1', [500]), # 500 devs
diff --git a/nova/tests/unit/servicegroup/test_api.py b/nova/tests/unit/servicegroup/test_api.py
index b451285e4e..4ded10360a 100644
--- a/nova/tests/unit/servicegroup/test_api.py
+++ b/nova/tests/unit/servicegroup/test_api.py
@@ -15,7 +15,7 @@
"""
Test the base class for the servicegroup API
"""
-import mock
+from unittest import mock
from nova import servicegroup
from nova import test
diff --git a/nova/tests/unit/servicegroup/test_db_servicegroup.py b/nova/tests/unit/servicegroup/test_db_servicegroup.py
index 9e04451ec7..9f718e17b7 100644
--- a/nova/tests/unit/servicegroup/test_db_servicegroup.py
+++ b/nova/tests/unit/servicegroup/test_db_servicegroup.py
@@ -12,7 +12,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
import oslo_messaging as messaging
from oslo_utils import fixture as utils_fixture
from oslo_utils import timeutils
diff --git a/nova/tests/unit/servicegroup/test_mc_servicegroup.py b/nova/tests/unit/servicegroup/test_mc_servicegroup.py
index 3b8399dfe3..e3896bb375 100644
--- a/nova/tests/unit/servicegroup/test_mc_servicegroup.py
+++ b/nova/tests/unit/servicegroup/test_mc_servicegroup.py
@@ -16,7 +16,7 @@
# under the License.
import iso8601
-import mock
+from unittest import mock
from nova import servicegroup
from nova import test
diff --git a/nova/tests/unit/storage/test_rbd.py b/nova/tests/unit/storage/test_rbd.py
index 396f22c643..f89c2dee89 100644
--- a/nova/tests/unit/storage/test_rbd.py
+++ b/nova/tests/unit/storage/test_rbd.py
@@ -10,9 +10,9 @@
# License for the specific language governing permissions and limitations
# under the License.
+from unittest import mock
from eventlet import tpool
-import mock
from oslo_concurrency import processutils
from oslo_serialization import jsonutils
from oslo_utils.fixture import uuidsentinel as uuids
@@ -524,7 +524,7 @@ class RbdTestCase(test.NoDBTestCase):
self.driver.destroy_volume(vol)
# Make sure both params have the expected values
- retryctx = mock_loopingcall.call_args.args[3]
+ retryctx = mock_loopingcall.call_args[0][3]
self.assertEqual(retryctx, {'retries': 6})
loopingcall.start.assert_called_with(interval=10)
diff --git a/nova/tests/unit/test_availability_zones.py b/nova/tests/unit/test_availability_zones.py
index 438e8dba24..f2e02e39c7 100644
--- a/nova/tests/unit/test_availability_zones.py
+++ b/nova/tests/unit/test_availability_zones.py
@@ -17,7 +17,8 @@
Tests for availability zones
"""
-import mock
+from unittest import mock
+
from oslo_utils.fixture import uuidsentinel
from nova import availability_zones as az
diff --git a/nova/tests/unit/test_block_device.py b/nova/tests/unit/test_block_device.py
index f5a4fc5694..40020a203f 100644
--- a/nova/tests/unit/test_block_device.py
+++ b/nova/tests/unit/test_block_device.py
@@ -17,7 +17,8 @@
Tests for Block Device utility functions.
"""
-import mock
+from unittest import mock
+
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import units
diff --git a/nova/tests/unit/test_cache.py b/nova/tests/unit/test_cache.py
index b7059796f1..3f656a49b0 100644
--- a/nova/tests/unit/test_cache.py
+++ b/nova/tests/unit/test_cache.py
@@ -10,7 +10,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from nova import cache_utils
from nova import test
diff --git a/nova/tests/unit/test_cinder.py b/nova/tests/unit/test_cinder.py
index 00e79711ec..e758343549 100644
--- a/nova/tests/unit/test_cinder.py
+++ b/nova/tests/unit/test_cinder.py
@@ -13,9 +13,9 @@
# under the License.
import collections
+from unittest import mock
from cinderclient.v3 import client as cinder_client_v3
-import mock
from requests_mock.contrib import fixture
import nova.conf
diff --git a/nova/tests/unit/test_conf.py b/nova/tests/unit/test_conf.py
index 95a7c45114..4496922e26 100644
--- a/nova/tests/unit/test_conf.py
+++ b/nova/tests/unit/test_conf.py
@@ -14,8 +14,8 @@
import os
import tempfile
+from unittest import mock
-import mock
from oslo_config import cfg
import nova.conf.compute
diff --git a/nova/tests/unit/test_configdrive2.py b/nova/tests/unit/test_configdrive2.py
index 4c0ae0acb4..d04310639b 100644
--- a/nova/tests/unit/test_configdrive2.py
+++ b/nova/tests/unit/test_configdrive2.py
@@ -16,8 +16,8 @@
import os
import tempfile
+from unittest import mock
-import mock
from oslo_config import cfg
from oslo_utils import fileutils
diff --git a/nova/tests/unit/test_context.py b/nova/tests/unit/test_context.py
index cc3d7c7eea..53c8825046 100644
--- a/nova/tests/unit/test_context.py
+++ b/nova/tests/unit/test_context.py
@@ -12,7 +12,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from oslo_context import context as o_context
from oslo_context import fixture as o_fixture
from oslo_utils.fixture import uuidsentinel as uuids
@@ -196,7 +197,6 @@ class ContextTestCase(test.NoDBTestCase):
'roles': [],
'service_catalog': [],
'show_deleted': False,
- 'tenant': 222,
'timestamp': '2015-03-02T22:31:56.641629',
'user': 111,
'user_domain': None,
diff --git a/nova/tests/unit/test_crypto.py b/nova/tests/unit/test_crypto.py
index 30152b2b01..5cf92af448 100644
--- a/nova/tests/unit/test_crypto.py
+++ b/nova/tests/unit/test_crypto.py
@@ -18,11 +18,11 @@ Tests for Crypto module.
import io
import os
+from unittest import mock
from castellan.common import exception as castellan_exception
from cryptography.hazmat import backends
from cryptography.hazmat.primitives import serialization
-import mock
from oslo_concurrency import processutils
from oslo_utils.fixture import uuidsentinel as uuids
import paramiko
diff --git a/nova/tests/unit/test_exception_wrapper.py b/nova/tests/unit/test_exception_wrapper.py
index 56eadf6952..71da124fd9 100644
--- a/nova/tests/unit/test_exception_wrapper.py
+++ b/nova/tests/unit/test_exception_wrapper.py
@@ -14,7 +14,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from nova import context as nova_context
from nova import exception_wrapper
diff --git a/nova/tests/unit/test_filesystem.py b/nova/tests/unit/test_filesystem.py
new file mode 100644
index 0000000000..85f16157ee
--- /dev/null
+++ b/nova/tests/unit/test_filesystem.py
@@ -0,0 +1,52 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import os
+from unittest import mock
+
+from nova import exception
+from nova import filesystem
+from nova import test
+
+
+class TestFSCommon(test.NoDBTestCase):
+
+ def test_read_sys(self):
+ open_mock = mock.mock_open(read_data='bar')
+ with mock.patch('builtins.open', open_mock) as m_open:
+ self.assertEqual('bar', filesystem.read_sys('foo'))
+ expected_path = os.path.join(filesystem.SYS, 'foo')
+ m_open.assert_called_once_with(expected_path, mode='r')
+
+ def test_read_sys_error(self):
+ with mock.patch('builtins.open',
+ side_effect=OSError('error')) as m_open:
+ self.assertRaises(exception.FileNotFound,
+ filesystem.read_sys, 'foo')
+ expected_path = os.path.join(filesystem.SYS, 'foo')
+ m_open.assert_called_once_with(expected_path, mode='r')
+
+ def test_write_sys(self):
+ open_mock = mock.mock_open()
+ with mock.patch('builtins.open', open_mock) as m_open:
+ self.assertIsNone(filesystem.write_sys('foo', 'bar'))
+ expected_path = os.path.join(filesystem.SYS, 'foo')
+ m_open.assert_called_once_with(expected_path, mode='w')
+ open_mock().write.assert_called_once_with('bar')
+
+ def test_write_sys_error(self):
+ with mock.patch('builtins.open',
+ side_effect=OSError('fake_error')) as m_open:
+ self.assertRaises(exception.FileNotFound,
+ filesystem.write_sys, 'foo', 'bar')
+ expected_path = os.path.join(filesystem.SYS, 'foo')
+ m_open.assert_called_once_with(expected_path, mode='w')
diff --git a/nova/tests/unit/test_fixtures.py b/nova/tests/unit/test_fixtures.py
index 22b278771a..8a5db79855 100644
--- a/nova/tests/unit/test_fixtures.py
+++ b/nova/tests/unit/test_fixtures.py
@@ -17,10 +17,10 @@
import copy
import datetime
import io
+from unittest import mock
import fixtures as fx
import futurist
-import mock
from oslo_config import cfg
from oslo_db import exception as db_exc
from oslo_log import log as logging
@@ -51,6 +51,20 @@ CONF = cfg.CONF
class TestLogging(testtools.TestCase):
def test_default_logging(self):
+ # This test validates that in default logging mode,
+ # we have two logging handlers:
+ # 1 x to display default messages (info, error, warnings...)
+ # 1 x to redirect debug messages to null and so don't display them.
+
+ # However, if OS_DEBUG=True is set in a shell session, then the test is
+ # run and fails. Because, in debug mode, we should have
+ # only one handler to display all messages.
+
+ # Here, we explicitly set OS_DEBUG=0.
+ # So it will ensure we have two handlers whatever
+ # OS_DEBUG value set in the user shell.
+ self.useFixture(fx.EnvironmentVariable('OS_DEBUG', '0'))
+
stdlog = self.useFixture(fixtures.StandardLogging())
root = logging.getLogger()
# there should be a null handler as well at DEBUG
diff --git a/nova/tests/unit/test_hacking.py b/nova/tests/unit/test_hacking.py
index 03b7692217..41cbada99f 100644
--- a/nova/tests/unit/test_hacking.py
+++ b/nova/tests/unit/test_hacking.py
@@ -13,8 +13,8 @@
# under the License.
import textwrap
+from unittest import mock
-import mock
import pycodestyle
from nova.hacking import checks
@@ -1020,3 +1020,47 @@ class HackingTestCase(test.NoDBTestCase):
nova_utils.ReaderWriterLock()
"""
self._assert_has_no_errors(code, checks.check_lockutils_rwlocks)
+
+ def test_check_six(self):
+ code = """
+ import six
+ from six import moves
+ from six.moves import range
+ import six.moves.urllib.parse as urlparse
+ """
+ errors = [(x + 1, 0, 'N370') for x in range(4)]
+ self._assert_has_errors(code, checks.check_six, expected_errors=errors)
+
+ def test_import_stock_mock(self):
+ self._assert_has_errors(
+ "import mock",
+ checks.import_stock_mock, expected_errors=[(1, 0, 'N371')])
+ self._assert_has_errors(
+ "from mock import patch",
+ checks.import_stock_mock, expected_errors=[(1, 0, 'N371')])
+ code = """
+ from unittest import mock
+ import unittest.mock
+ """
+ self._assert_has_no_errors(code, checks.import_stock_mock)
+
+ def test_check_set_daemon(self):
+ code = """
+ self.setDaemon(True)
+ worker.setDaemon(True)
+ self._event_thread.setDaemon(True)
+ mythread.setDaemon(False)
+ self.thread.setDaemon(1)
+ """
+ errors = [(x + 1, 0, 'N372') for x in range(5)]
+ self._assert_has_errors(
+ code, checks.check_set_daemon, expected_errors=errors)
+
+ code = """
+ self.setDaemon = True
+ worker.setDaemonFlag(True)
+ self._event_thread.resetDaemon(True)
+ self.set.Daemon(True)
+ self.thread.setdaemon(True)
+ """
+ self._assert_has_no_errors(code, checks.check_set_daemon)
diff --git a/nova/tests/unit/test_identity.py b/nova/tests/unit/test_identity.py
index 099a9182d7..2bb5e7f9c0 100644
--- a/nova/tests/unit/test_identity.py
+++ b/nova/tests/unit/test_identity.py
@@ -13,7 +13,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from keystoneauth1.adapter import Adapter
from keystoneauth1 import exceptions as kse
@@ -29,7 +29,7 @@ class IdentityValidationTest(test.NoDBTestCase):
There are times when Nova stores keystone project_id and user_id
in our database as strings. Until the Pike release none of this
- data was validated, so it was very easy for adminstrators to think
+ data was validated, so it was very easy for administrators to think
they were adjusting quota for a project (by name) when instead
they were just inserting keys in a database that would not get used.
diff --git a/nova/tests/unit/test_json_ref.py b/nova/tests/unit/test_json_ref.py
index 5a139055f5..e7cbbc9133 100644
--- a/nova/tests/unit/test_json_ref.py
+++ b/nova/tests/unit/test_json_ref.py
@@ -12,7 +12,7 @@
# License for the specific language governing permissions and limitations
# under the License.
import copy
-import mock
+from unittest import mock
from nova import test
from nova.tests import json_ref
diff --git a/nova/tests/unit/test_metadata.py b/nova/tests/unit/test_metadata.py
index 630cb54418..d013aeb651 100644
--- a/nova/tests/unit/test_metadata.py
+++ b/nova/tests/unit/test_metadata.py
@@ -22,10 +22,10 @@ import hmac
import os
import pickle
import re
+from unittest import mock
from keystoneauth1 import exceptions as ks_exceptions
from keystoneauth1 import session
-import mock
from oslo_config import cfg
from oslo_serialization import base64
from oslo_serialization import jsonutils
@@ -1200,7 +1200,7 @@ class MetadataHandlerTestCase(test.TestCase):
def _fake_x_get_metadata(self, self_app, instance_id, remote_address):
if remote_address is None:
- raise Exception('Expected X-Forwared-For header')
+ raise Exception('Expected X-Forwarded-For header')
if encodeutils.to_utf8(instance_id) == self.expected_instance_id:
return self.mdinst
@@ -1458,20 +1458,17 @@ class MetadataHandlerTestCase(test.TestCase):
for c in range(ord('a'), ord('z'))]
mock_client.list_subnets.return_value = {
'subnets': subnet_list}
+ mock_client.list_ports.side_effect = fake_list_ports
- with mock.patch.object(
- mock_client, 'list_ports',
- side_effect=fake_list_ports) as mock_list_ports:
-
- response = fake_request(
- self, self.mdinst,
- relpath="/2009-04-04/user-data",
- address="192.192.192.2",
- fake_get_metadata_by_instance_id=self._fake_x_get_metadata,
- headers={'X-Forwarded-For': '192.192.192.2',
- 'X-Metadata-Provider': proxy_lb_id})
-
- self.assertEqual(3, mock_list_ports.call_count)
+ response = fake_request(
+ self, self.mdinst,
+ relpath="/2009-04-04/user-data",
+ address="192.192.192.2",
+ fake_get_metadata_by_instance_id=self._fake_x_get_metadata,
+ headers={'X-Forwarded-For': '192.192.192.2',
+ 'X-Metadata-Provider': proxy_lb_id})
+
+ self.assertEqual(3, mock_client.list_ports.call_count)
self.assertEqual(200, response.status_int)
diff --git a/nova/tests/unit/test_notifications.py b/nova/tests/unit/test_notifications.py
index 344f62e758..062eeb7f4f 100644
--- a/nova/tests/unit/test_notifications.py
+++ b/nova/tests/unit/test_notifications.py
@@ -17,8 +17,8 @@
import copy
import datetime
+from unittest import mock
-import mock
from oslo_config import cfg
from oslo_context import fixture as o_fixture
from oslo_utils.fixture import uuidsentinel as uuids
@@ -112,7 +112,7 @@ class NotificationsTestCase(test.TestCase):
# test config disable of just the task state notifications
self.flags(notify_on_state_change="vm_state", group='notifications')
- # we should not get a notification on task stgate chagne now
+ # we should not get a notification on task state change now
old = copy.copy(self.instance)
self.instance.task_state = task_states.SPAWNING
diff --git a/nova/tests/unit/test_notifier.py b/nova/tests/unit/test_notifier.py
index 95366cdf28..fc01b1cf83 100644
--- a/nova/tests/unit/test_notifier.py
+++ b/nova/tests/unit/test_notifier.py
@@ -13,7 +13,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from nova import rpc
from nova import test
diff --git a/nova/tests/unit/test_policy.py b/nova/tests/unit/test_policy.py
index f6f5c3e64b..752b872381 100644
--- a/nova/tests/unit/test_policy.py
+++ b/nova/tests/unit/test_policy.py
@@ -16,8 +16,8 @@
"""Test of Policy Engine For Nova."""
import os.path
+from unittest import mock
-import mock
from oslo_policy import policy as oslo_policy
from oslo_serialization import jsonutils
import requests_mock
@@ -303,10 +303,10 @@ class RealRolePolicyTestCase(test.NoDBTestCase):
def setUp(self):
super(RealRolePolicyTestCase, self).setUp()
self.policy = self.useFixture(nova_fixtures.RealPolicyFixture())
- self.non_admin_context = context.RequestContext('fake', 'fake',
- roles=['member'])
- self.admin_context = context.RequestContext('fake', 'fake', True,
- roles=['member'])
+ self.non_admin_context = context.RequestContext(
+ 'fake', 'fake', roles=['member', 'reader'])
+ self.admin_context = context.RequestContext(
+ 'fake', 'fake', True, roles=['admin', 'member', 'reader'])
self.target = {}
self.fake_policy = jsonutils.loads(fake_policy.policy_data)
@@ -358,6 +358,7 @@ class RealRolePolicyTestCase(test.NoDBTestCase):
"os_compute_api:os-services:update",
"os_compute_api:os-services:delete",
"os_compute_api:os-shelve:shelve_offload",
+"os_compute_api:os-shelve:unshelve_to_host",
"os_compute_api:os-availability-zone:detail",
"os_compute_api:os-assisted-volume-snapshots:create",
"os_compute_api:os-assisted-volume-snapshots:delete",
@@ -366,6 +367,27 @@ class RealRolePolicyTestCase(test.NoDBTestCase):
"os_compute_api:os-server-external-events:create",
"os_compute_api:os-volumes-attachments:swap",
"os_compute_api:servers:create:zero_disk_flavor",
+"os_compute_api:os-baremetal-nodes:list",
+"os_compute_api:os-baremetal-nodes:show",
+"os_compute_api:servers:migrations:index",
+"os_compute_api:servers:migrations:show",
+"os_compute_api:os-simple-tenant-usage:list",
+"os_compute_api:os-migrations:index",
+"os_compute_api:os-services:list",
+"os_compute_api:os-instance-actions:events:details",
+"os_compute_api:os-instance-usage-audit-log:list",
+"os_compute_api:os-instance-usage-audit-log:show",
+"os_compute_api:os-hosts:list",
+"os_compute_api:os-hosts:show",
+"os_compute_api:os-hypervisors:list",
+"os_compute_api:os-hypervisors:list-detail",
+"os_compute_api:os-hypervisors:show",
+"os_compute_api:os-hypervisors:statistics",
+"os_compute_api:os-hypervisors:uptime",
+"os_compute_api:os-hypervisors:search",
+"os_compute_api:os-hypervisors:servers",
+"os_compute_api:limits:other_project",
+"os_compute_api:os-flavor-access",
)
self.admin_or_owner_rules = (
@@ -409,6 +431,7 @@ class RealRolePolicyTestCase(test.NoDBTestCase):
"os_compute_api:servers:resize",
"os_compute_api:servers:revert_resize",
"os_compute_api:servers:show",
+"os_compute_api:servers:show:flavor-extra-specs",
"os_compute_api:servers:update",
"os_compute_api:servers:create_image:allow_volume_backed",
"os_compute_api:os-admin-password",
@@ -418,7 +441,6 @@ class RealRolePolicyTestCase(test.NoDBTestCase):
"os_compute_api:os-remote-consoles",
"os_compute_api:os-deferred-delete:restore",
"os_compute_api:os-deferred-delete:force",
-"os_compute_api:os-flavor-access",
"os_compute_api:os-flavor-extra-specs:index",
"os_compute_api:os-flavor-extra-specs:show",
"os_compute_api:os-floating-ips:add",
@@ -454,44 +476,6 @@ class RealRolePolicyTestCase(test.NoDBTestCase):
"os_compute_api:os-volumes-attachments:create",
"os_compute_api:os-volumes-attachments:delete",
"os_compute_api:os-volumes-attachments:update",
-)
-
- self.allow_all_rules = (
-"os_compute_api:os-quota-sets:defaults",
-"os_compute_api:os-availability-zone:list",
-"os_compute_api:limits",
-"os_compute_api:extensions",
-"os_compute_api:os-floating-ip-pools",
-)
-
- self.system_reader_rules = (
-"os_compute_api:os-tenant-networks:list",
-"os_compute_api:os-tenant-networks:show",
-"os_compute_api:os-baremetal-nodes:list",
-"os_compute_api:os-baremetal-nodes:show",
-"os_compute_api:servers:migrations:index",
-"os_compute_api:servers:migrations:show",
-"os_compute_api:os-simple-tenant-usage:list",
-"os_compute_api:os-migrations:index",
-"os_compute_api:os-services:list",
-"os_compute_api:os-instance-actions:events:details",
-"os_compute_api:os-instance-usage-audit-log:list",
-"os_compute_api:os-instance-usage-audit-log:show",
-"os_compute_api:os-hosts:list",
-"os_compute_api:os-hosts:show",
-"os_compute_api:os-hypervisors:list",
-"os_compute_api:os-hypervisors:list-detail",
-"os_compute_api:os-hypervisors:show",
-"os_compute_api:os-hypervisors:statistics",
-"os_compute_api:os-hypervisors:uptime",
-"os_compute_api:os-hypervisors:search",
-"os_compute_api:os-hypervisors:servers",
-"os_compute_api:limits:other_project",
-"os_compute_api:os-networks:list",
-"os_compute_api:os-networks:show",
-)
-
- self.system_reader_or_owner_rules = (
"os_compute_api:os-simple-tenant-usage:show",
"os_compute_api:os-security-groups:get",
"os_compute_api:os-security-groups:show",
@@ -513,6 +497,18 @@ class RealRolePolicyTestCase(test.NoDBTestCase):
"os_compute_api:os-volumes:snapshots:show",
"os_compute_api:os-volumes:snapshots:list",
"os_compute_api:os-volumes:snapshots:detail",
+"os_compute_api:os-networks:list",
+"os_compute_api:os-networks:show",
+"os_compute_api:os-tenant-networks:list",
+"os_compute_api:os-tenant-networks:show",
+)
+
+ self.allow_all_rules = (
+"os_compute_api:os-quota-sets:defaults",
+"os_compute_api:os-availability-zone:list",
+"os_compute_api:limits",
+"os_compute_api:extensions",
+"os_compute_api:os-floating-ip-pools",
)
self.allow_nobody_rules = (
@@ -557,13 +553,11 @@ class RealRolePolicyTestCase(test.NoDBTestCase):
# admin_only, non_admin, admin_or_user, empty_rule
special_rules = ('admin_api', 'admin_or_owner', 'context_is_admin',
'os_compute_api:os-quota-class-sets:show',
- 'system_admin_api', 'system_reader_api',
'project_admin_api', 'project_member_api',
- 'project_reader_api', 'system_admin_or_owner',
- 'system_or_project_reader')
+ 'project_reader_api', 'project_member_or_admin',
+ 'project_reader_or_admin')
result = set(rules.keys()) - set(self.admin_only_rules +
self.admin_or_owner_rules +
- self.allow_all_rules + self.system_reader_rules +
- self.system_reader_or_owner_rules +
+ self.allow_all_rules +
self.allow_nobody_rules + special_rules)
self.assertEqual(set([]), result)
diff --git a/nova/tests/unit/test_quota.py b/nova/tests/unit/test_quota.py
index 312449b13a..7979d83e91 100644
--- a/nova/tests/unit/test_quota.py
+++ b/nova/tests/unit/test_quota.py
@@ -14,15 +14,20 @@
# License for the specific language governing permissions and limitations
# under the License.
+from unittest import mock
+
import ddt
-import mock
from oslo_db.sqlalchemy import enginefacade
+from oslo_limit import fixture as limit_fixture
+from oslo_utils.fixture import uuidsentinel as uuids
from nova.compute import api as compute
import nova.conf
from nova import context
from nova.db.main import models
from nova import exception
+from nova.limit import local as local_limit
+from nova.limit import placement as placement_limit
from nova import objects
from nova import quota
from nova import test
@@ -57,6 +62,7 @@ class QuotaIntegrationTestCase(test.TestCase):
super(QuotaIntegrationTestCase, self).setUp()
self.flags(instances=2,
cores=4,
+ ram=16384,
group='quota')
self.user_id = 'admin'
@@ -97,7 +103,7 @@ class QuotaIntegrationTestCase(test.TestCase):
# _instances_cores_ram_count().
inst_map = objects.InstanceMapping(
self.context, instance_uuid=inst.uuid, project_id=inst.project_id,
- cell_mapping=cell1)
+ user_id=inst.user_id, cell_mapping=cell1)
inst_map.create()
return inst
@@ -109,15 +115,15 @@ class QuotaIntegrationTestCase(test.TestCase):
self.compute_api.create(
self.context, min_count=1, max_count=1,
flavor=self.flavor, image_href=image_uuid)
- except exception.QuotaError as e:
+ except exception.OverQuota as e:
expected_kwargs = {'code': 413,
- 'req': '1, 1',
- 'used': '8, 2',
- 'allowed': '4, 2',
- 'overs': 'cores, instances'}
+ 'req': '1, 1, 2048',
+ 'used': '8, 2, 16384',
+ 'allowed': '4, 2, 16384',
+ 'overs': 'cores, instances, ram'}
self.assertEqual(expected_kwargs, e.kwargs)
else:
- self.fail('Expected QuotaError exception')
+ self.fail('Expected OverQuota exception')
def test_too_many_cores(self):
self._create_instance()
@@ -126,7 +132,7 @@ class QuotaIntegrationTestCase(test.TestCase):
self.compute_api.create(
self.context, min_count=1, max_count=1, flavor=self.flavor,
image_href=image_uuid)
- except exception.QuotaError as e:
+ except exception.OverQuota as e:
expected_kwargs = {'code': 413,
'req': '1',
'used': '4',
@@ -134,7 +140,7 @@ class QuotaIntegrationTestCase(test.TestCase):
'overs': 'cores'}
self.assertEqual(expected_kwargs, e.kwargs)
else:
- self.fail('Expected QuotaError exception')
+ self.fail('Expected OverQuota exception')
def test_many_cores_with_unlimited_quota(self):
# Setting cores quota to unlimited:
@@ -150,7 +156,7 @@ class QuotaIntegrationTestCase(test.TestCase):
metadata['key%s' % i] = 'value%s' % i
image_uuid = 'cedef40a-ed67-4d10-800e-17455edce175'
self.assertRaises(
- exception.QuotaError, self.compute_api.create,
+ exception.OverQuota, self.compute_api.create,
self.context, min_count=1, max_count=1, flavor=self.flavor,
image_href=image_uuid, metadata=metadata)
@@ -170,41 +176,127 @@ class QuotaIntegrationTestCase(test.TestCase):
files = []
for i in range(CONF.quota.injected_files):
files.append(('/my/path%d' % i, 'config = test\n'))
- self._create_with_injected_files(files) # no QuotaError
+ self._create_with_injected_files(files) # no OverQuota
def test_too_many_injected_files(self):
files = []
for i in range(CONF.quota.injected_files + 1):
files.append(('/my/path%d' % i, 'my\ncontent%d\n' % i))
- self.assertRaises(exception.QuotaError,
+ self.assertRaises(exception.OverQuota,
self._create_with_injected_files, files)
def test_max_injected_file_content_bytes(self):
max = CONF.quota.injected_file_content_bytes
content = ''.join(['a' for i in range(max)])
files = [('/test/path', content)]
- self._create_with_injected_files(files) # no QuotaError
+ self._create_with_injected_files(files) # no OverQuota
def test_too_many_injected_file_content_bytes(self):
max = CONF.quota.injected_file_content_bytes
content = ''.join(['a' for i in range(max + 1)])
files = [('/test/path', content)]
- self.assertRaises(exception.QuotaError,
+ self.assertRaises(exception.OverQuota,
self._create_with_injected_files, files)
def test_max_injected_file_path_bytes(self):
max = CONF.quota.injected_file_path_length
path = ''.join(['a' for i in range(max)])
files = [(path, 'config = quotatest')]
- self._create_with_injected_files(files) # no QuotaError
+ self._create_with_injected_files(files) # no OverQuota
def test_too_many_injected_file_path_bytes(self):
max = CONF.quota.injected_file_path_length
path = ''.join(['a' for i in range(max + 1)])
files = [(path, 'config = quotatest')]
- self.assertRaises(exception.QuotaError,
+ self.assertRaises(exception.OverQuota,
self._create_with_injected_files, files)
+ def _test_with_server_group_members(self):
+ # use a known image uuid to avoid ImageNotFound errors
+ image_uuid = nova_fixtures.GlanceFixture.image4['id']
+
+ instance_group = objects.InstanceGroup(self.context,
+ policy="anti-affinity")
+ instance_group.name = "foo"
+ instance_group.project_id = self.context.project_id
+ instance_group.user_id = self.context.user_id
+ instance_group.uuid = uuids.instance_group
+ instance_group.create()
+
+ self.addCleanup(instance_group.destroy)
+
+ self.compute_api.create(
+ self.context, flavor=self.flavor,
+ image_href=image_uuid,
+ scheduler_hints={'group': uuids.instance_group},
+ check_server_group_quota=True)
+
+ exc = self.assertRaises(exception.OverQuota, self.compute_api.create,
+ self.context,
+ flavor=self.flavor,
+ image_href=image_uuid,
+ scheduler_hints={
+ 'group': uuids.instance_group},
+ check_server_group_quota=True)
+ return exc
+
+ def test_with_server_group_members(self):
+ self.flags(server_group_members=1, group="quota")
+ exc = self._test_with_server_group_members()
+ self.assertEqual("Quota exceeded, too many servers in group", str(exc))
+
+
+class UnifiedLimitsIntegrationTestCase(QuotaIntegrationTestCase):
+ """Test that API and DB resources enforce properly with unified limits.
+
+ Note: coverage for instances, cores, ram, and disk is located under
+ nova/tests/functional/. We don't attempt to test it here as the
+ PlacementFixture is needed to provide resource usages and it is only
+ available in the functional tests environment.
+
+ Note that any test that will succeed in creating a server also needs to be
+ able to use the PlacementFixture as cores, ram, and disk quota are enforced
+ while booting a server. These tests are also located under
+ nova/tests/functional/.
+ """
+
+ def setUp(self):
+ super(UnifiedLimitsIntegrationTestCase, self).setUp()
+ self.flags(driver="nova.quota.UnifiedLimitsDriver", group="quota")
+ reglimits = {local_limit.SERVER_METADATA_ITEMS: 128,
+ local_limit.INJECTED_FILES: 5,
+ local_limit.INJECTED_FILES_CONTENT: 10 * 1024,
+ local_limit.INJECTED_FILES_PATH: 255,
+ local_limit.KEY_PAIRS: 100,
+ local_limit.SERVER_GROUPS: 10,
+ local_limit.SERVER_GROUP_MEMBERS: 10,
+ 'servers': 10,
+ 'class:VCPU': 20,
+ 'class:MEMORY_MB': 50 * 1024,
+ 'class:DISK_GB': 100}
+ self.useFixture(limit_fixture.LimitFixture(reglimits, {}))
+
+ def test_too_many_instances(self):
+ pass
+
+ def test_too_many_cores(self):
+ pass
+
+ def test_no_injected_files(self):
+ pass
+
+ def test_max_injected_files(self):
+ pass
+
+ def test_max_injected_file_content_bytes(self):
+ pass
+
+ def test_max_injected_file_path_bytes(self):
+ pass
+
+ def test_with_server_group_members(self):
+ pass
+
@enginefacade.transaction_context_provider
class FakeContext(context.RequestContext):
@@ -340,6 +432,19 @@ class QuotaEngineTestCase(test.TestCase):
quota_obj = quota.QuotaEngine(quota_driver=FakeDriver)
self.assertEqual(quota_obj._driver, FakeDriver)
+ def test_init_with_flag_set(self):
+ quota_obj = quota.QuotaEngine()
+ self.assertIsInstance(quota_obj._driver, quota.DbQuotaDriver)
+
+ self.flags(group="quota", driver="nova.quota.NoopQuotaDriver")
+ self.assertIsInstance(quota_obj._driver, quota.NoopQuotaDriver)
+
+ self.flags(group="quota", driver="nova.quota.UnifiedLimitsDriver")
+ self.assertIsInstance(quota_obj._driver, quota.UnifiedLimitsDriver)
+
+ self.flags(group="quota", driver="nova.quota.DbQuotaDriver")
+ self.assertIsInstance(quota_obj._driver, quota.DbQuotaDriver)
+
def _get_quota_engine(self, driver, resources=None):
resources = resources or [
quota.AbsoluteResource('test_resource4'),
@@ -1871,6 +1976,133 @@ class NoopQuotaDriverTestCase(test.TestCase):
self.assertEqual(self.expected_settable_quotas, result)
+class UnifiedLimitsDriverTestCase(NoopQuotaDriverTestCase):
+ def setUp(self):
+ super(UnifiedLimitsDriverTestCase, self).setUp()
+ self.driver = quota.UnifiedLimitsDriver()
+ # Set this so all limits get a different value but we also test as much
+ # as possible with the default config
+ reglimits = {local_limit.SERVER_METADATA_ITEMS: 128,
+ local_limit.INJECTED_FILES: 5,
+ local_limit.INJECTED_FILES_CONTENT: 10 * 1024,
+ local_limit.INJECTED_FILES_PATH: 255,
+ local_limit.KEY_PAIRS: 100,
+ local_limit.SERVER_GROUPS: 12,
+ local_limit.SERVER_GROUP_MEMBERS: 10}
+ self.useFixture(limit_fixture.LimitFixture(reglimits, {}))
+
+ self.expected_without_dict = {
+ 'cores': 2,
+ 'fixed_ips': -1,
+ 'floating_ips': -1,
+ 'injected_file_content_bytes': 10240,
+ 'injected_file_path_bytes': 255,
+ 'injected_files': 5,
+ 'instances': 1,
+ 'key_pairs': 100,
+ 'metadata_items': 128,
+ 'ram': 0,
+ 'security_group_rules': -1,
+ 'security_groups': -1,
+ 'server_group_members': 10,
+ 'server_groups': 12,
+ }
+ self.expected_without_usages = {
+ 'cores': {'limit': 2},
+ 'fixed_ips': {'limit': -1},
+ 'floating_ips': {'limit': -1},
+ 'injected_file_content_bytes': {'limit': 10240},
+ 'injected_file_path_bytes': {'limit': 255},
+ 'injected_files': {'limit': 5},
+ 'instances': {'limit': 1},
+ 'key_pairs': {'limit': 100},
+ 'metadata_items': {'limit': 128},
+ 'ram': {'limit': 3},
+ 'security_group_rules': {'limit': -1},
+ 'security_groups': {'limit': -1},
+ 'server_group_members': {'limit': 10},
+ 'server_groups': {'limit': 12}
+ }
+ self.expected_with_usages = {
+ 'cores': {'in_use': 5, 'limit': 2},
+ 'fixed_ips': {'in_use': 0, 'limit': -1},
+ 'floating_ips': {'in_use': 0, 'limit': -1},
+ 'injected_file_content_bytes': {'in_use': 0, 'limit': 10240},
+ 'injected_file_path_bytes': {'in_use': 0, 'limit': 255},
+ 'injected_files': {'in_use': 0, 'limit': 5},
+ 'instances': {'in_use': 4, 'limit': 1},
+ 'key_pairs': {'in_use': 0, 'limit': 100},
+ 'metadata_items': {'in_use': 0, 'limit': 128},
+ 'ram': {'in_use': 6, 'limit': 3},
+ 'security_group_rules': {'in_use': 0, 'limit': -1},
+ 'security_groups': {'in_use': 0, 'limit': -1},
+ 'server_group_members': {'in_use': 0, 'limit': 10},
+ 'server_groups': {'in_use': 9, 'limit': 12}
+ }
+
+ @mock.patch.object(placement_limit, "get_legacy_default_limits")
+ def test_get_defaults(self, mock_default):
+ # zero for ram simulates no registered limit for ram
+ mock_default.return_value = {"instances": 1, "cores": 2, "ram": 0}
+ result = self.driver.get_defaults(None, quota.QUOTAS._resources)
+ self.assertEqual(self.expected_without_dict, result)
+ mock_default.assert_called_once_with()
+
+ @mock.patch.object(placement_limit, "get_legacy_default_limits")
+ def test_get_class_quotas(self, mock_default):
+ mock_default.return_value = {"instances": 1, "cores": 2, "ram": 0}
+ result = self.driver.get_class_quotas(
+ None, quota.QUOTAS._resources, 'test_class')
+ self.assertEqual(self.expected_without_dict, result)
+ mock_default.assert_called_once_with()
+
+ @mock.patch.object(placement_limit, "get_legacy_counts")
+ @mock.patch.object(placement_limit, "get_legacy_project_limits")
+ @mock.patch.object(objects.InstanceGroupList, "get_counts")
+ def test_get_project_quotas(self, mock_count, mock_proj, mock_kcount):
+ mock_proj.return_value = {"instances": 1, "cores": 2, "ram": 3}
+ mock_kcount.return_value = {"instances": 4, "cores": 5, "ram": 6}
+ mock_count.return_value = {'project': {'server_groups': 9}}
+ result = self.driver.get_project_quotas(
+ None, quota.QUOTAS._resources, 'test_project')
+ self.assertEqual(self.expected_with_usages, result)
+ mock_count.assert_called_once_with(None, "test_project")
+
+ @mock.patch.object(placement_limit, "get_legacy_project_limits")
+ @mock.patch.object(objects.InstanceGroupList, "get_counts")
+ def test_get_project_quotas_no_usages(self, mock_count, mock_proj):
+ mock_proj.return_value = {"instances": 1, "cores": 2, "ram": 3}
+ result = self.driver.get_project_quotas(
+ None, quota.QUOTAS._resources, 'test_project', usages=False)
+ self.assertEqual(self.expected_without_usages, result)
+ # ensure usages not fetched when not required
+ self.assertEqual(0, mock_count.call_count)
+ mock_proj.assert_called_once_with("test_project")
+
+ @mock.patch.object(placement_limit, "get_legacy_counts")
+ @mock.patch.object(placement_limit, "get_legacy_project_limits")
+ @mock.patch.object(objects.InstanceGroupList, "get_counts")
+ def test_get_user_quotas(self, mock_count, mock_proj, mock_kcount):
+ mock_proj.return_value = {"instances": 1, "cores": 2, "ram": 3}
+ mock_kcount.return_value = {"instances": 4, "cores": 5, "ram": 6}
+ mock_count.return_value = {'project': {'server_groups': 9}}
+ result = self.driver.get_user_quotas(
+ None, quota.QUOTAS._resources, 'test_project', 'fake_user')
+ self.assertEqual(self.expected_with_usages, result)
+ mock_count.assert_called_once_with(None, "test_project")
+
+ @mock.patch.object(placement_limit, "get_legacy_project_limits")
+ @mock.patch.object(objects.InstanceGroupList, "get_counts")
+ def test_get_user_quotas_no_usages(self, mock_count, mock_proj):
+ mock_proj.return_value = {"instances": 1, "cores": 2, "ram": 3}
+ result = self.driver.get_user_quotas(
+ None, quota.QUOTAS._resources, 'test_project', 'fake_user',
+ usages=False)
+ self.assertEqual(self.expected_without_usages, result)
+ # ensure usages not fetched when not required
+ self.assertEqual(0, mock_count.call_count)
+
+
@ddt.ddt
class QuotaCountTestCase(test.NoDBTestCase):
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
diff --git a/nova/tests/unit/test_rpc.py b/nova/tests/unit/test_rpc.py
index eece75af96..40a914b5f7 100644
--- a/nova/tests/unit/test_rpc.py
+++ b/nova/tests/unit/test_rpc.py
@@ -12,7 +12,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
import oslo_messaging as messaging
from oslo_messaging.rpc import dispatcher
from oslo_serialization import jsonutils
@@ -213,20 +214,20 @@ class TestRPC(test.NoDBTestCase):
@mock.patch.object(rpc, 'TRANSPORT')
@mock.patch.object(rpc, 'profiler', None)
@mock.patch.object(rpc, 'RequestContextSerializer')
- @mock.patch.object(messaging, 'RPCClient')
- def test_get_client(self, mock_client, mock_ser, mock_TRANSPORT):
+ @mock.patch.object(messaging, 'get_rpc_client')
+ def test_get_client(self, mock_get, mock_ser, mock_TRANSPORT):
tgt = mock.Mock()
ser = mock.Mock()
- mock_client.return_value = 'client'
+ mock_get.return_value = 'client'
mock_ser.return_value = ser
client = rpc.get_client(tgt, version_cap='1.0', serializer='foo')
mock_ser.assert_called_once_with('foo')
- mock_client.assert_called_once_with(mock_TRANSPORT,
- tgt, version_cap='1.0',
- call_monitor_timeout=None,
- serializer=ser)
+ mock_get.assert_called_once_with(mock_TRANSPORT,
+ tgt, version_cap='1.0',
+ call_monitor_timeout=None,
+ serializer=ser)
self.assertEqual('client', client)
@mock.patch.object(rpc, 'TRANSPORT')
@@ -252,21 +253,21 @@ class TestRPC(test.NoDBTestCase):
@mock.patch.object(rpc, 'TRANSPORT')
@mock.patch.object(rpc, 'profiler', mock.Mock())
@mock.patch.object(rpc, 'ProfilerRequestContextSerializer')
- @mock.patch.object(messaging, 'RPCClient')
- def test_get_client_profiler_enabled(self, mock_client, mock_ser,
+ @mock.patch.object(messaging, 'get_rpc_client')
+ def test_get_client_profiler_enabled(self, mock_get, mock_ser,
mock_TRANSPORT):
tgt = mock.Mock()
ser = mock.Mock()
- mock_client.return_value = 'client'
+ mock_get.return_value = 'client'
mock_ser.return_value = ser
client = rpc.get_client(tgt, version_cap='1.0', serializer='foo')
mock_ser.assert_called_once_with('foo')
- mock_client.assert_called_once_with(mock_TRANSPORT,
- tgt, version_cap='1.0',
- call_monitor_timeout=None,
- serializer=ser)
+ mock_get.assert_called_once_with(mock_TRANSPORT,
+ tgt, version_cap='1.0',
+ call_monitor_timeout=None,
+ serializer=ser)
self.assertEqual('client', client)
@mock.patch.object(rpc, 'TRANSPORT')
@@ -431,11 +432,11 @@ class TestProfilerRequestContextSerializer(test.NoDBTestCase):
class TestClientRouter(test.NoDBTestCase):
- @mock.patch('oslo_messaging.RPCClient')
- def test_by_instance(self, mock_rpcclient):
+ @mock.patch('oslo_messaging.get_rpc_client')
+ def test_by_instance(self, mock_get):
default_client = mock.Mock()
cell_client = mock.Mock()
- mock_rpcclient.return_value = cell_client
+ mock_get.return_value = cell_client
ctxt = mock.Mock()
ctxt.mq_connection = mock.sentinel.transport
@@ -443,7 +444,7 @@ class TestClientRouter(test.NoDBTestCase):
client = router.client(ctxt)
# verify a client was created by ClientRouter
- mock_rpcclient.assert_called_once_with(
+ mock_get.assert_called_once_with(
mock.sentinel.transport, default_client.target,
version_cap=default_client.version_cap,
call_monitor_timeout=default_client.call_monitor_timeout,
@@ -451,11 +452,11 @@ class TestClientRouter(test.NoDBTestCase):
# verify cell client was returned
self.assertEqual(cell_client, client)
- @mock.patch('oslo_messaging.RPCClient')
- def test_by_instance_untargeted(self, mock_rpcclient):
+ @mock.patch('oslo_messaging.get_rpc_client')
+ def test_by_instance_untargeted(self, mock_get):
default_client = mock.Mock()
cell_client = mock.Mock()
- mock_rpcclient.return_value = cell_client
+ mock_get.return_value = cell_client
ctxt = mock.Mock()
ctxt.mq_connection = None
@@ -463,7 +464,7 @@ class TestClientRouter(test.NoDBTestCase):
client = router.client(ctxt)
self.assertEqual(router.default_client, client)
- self.assertFalse(mock_rpcclient.called)
+ self.assertFalse(mock_get.called)
class TestIsNotificationsEnabledDecorator(test.NoDBTestCase):
diff --git a/nova/tests/unit/test_service.py b/nova/tests/unit/test_service.py
index b5721696db..acc1aeca7f 100644
--- a/nova/tests/unit/test_service.py
+++ b/nova/tests/unit/test_service.py
@@ -18,7 +18,8 @@
Unit Tests for remote procedure calls using queue
"""
-import mock
+from unittest import mock
+
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_service import service as _service
@@ -127,7 +128,7 @@ class ServiceTestCase(test.NoDBTestCase):
serv.manager.additional_endpoints = []
serv.start()
# init_host is called before any service record is created
- serv.manager.init_host.assert_called_once_with()
+ serv.manager.init_host.assert_called_once_with(None)
mock_get_by_host_and_binary.assert_called_once_with(mock.ANY,
self.host, self.binary)
mock_create.assert_called_once_with()
@@ -157,7 +158,7 @@ class ServiceTestCase(test.NoDBTestCase):
service_obj = mock.Mock()
service_obj.binary = 'fake-binary'
service_obj.host = 'fake-host'
- service_obj.version = -42
+ service_obj.version = 42
mock_get_by_host_and_binary.return_value = service_obj
serv = service.Service(self.host, self.binary, self.topic,
@@ -185,7 +186,7 @@ class ServiceTestCase(test.NoDBTestCase):
mock_create.side_effect = ex
serv.manager = mock_manager
self.assertRaises(test.TestingException, serv.start)
- serv.manager.init_host.assert_called_with()
+ serv.manager.init_host.assert_called_with(None)
mock_get_by_host_and_binary.assert_has_calls([
mock.call(mock.ANY, self.host, self.binary),
mock.call(mock.ANY, self.host, self.binary)])
@@ -215,7 +216,7 @@ class ServiceTestCase(test.NoDBTestCase):
serv.manager.service_name = self.topic
serv.manager.additional_endpoints = []
serv.start()
- serv.manager.init_host.assert_called_once_with()
+ serv.manager.init_host.assert_called_once_with(None)
mock_get_by_host_and_binary.assert_called_once_with(mock.ANY,
self.host,
self.binary)
@@ -240,7 +241,8 @@ class ServiceTestCase(test.NoDBTestCase):
serv.manager.additional_endpoints = []
serv.start()
- serv.manager.init_host.assert_called_with()
+ serv.manager.init_host.assert_called_with(
+ mock_svc_get_by_host_and_binary.return_value)
serv.stop()
serv.manager.cleanup_host.assert_called_with()
diff --git a/nova/tests/unit/test_service_auth.py b/nova/tests/unit/test_service_auth.py
index db2a2e2899..5f07515188 100644
--- a/nova/tests/unit/test_service_auth.py
+++ b/nova/tests/unit/test_service_auth.py
@@ -10,9 +10,10 @@
# License for the specific language governing permissions and limitations
# under the License.
+from unittest import mock
+
from keystoneauth1 import loading as ks_loading
from keystoneauth1 import service_token
-import mock
from nova import context
from nova import service_auth
diff --git a/nova/tests/unit/test_test.py b/nova/tests/unit/test_test.py
index 8381792de6..1042153b10 100644
--- a/nova/tests/unit/test_test.py
+++ b/nova/tests/unit/test_test.py
@@ -18,9 +18,9 @@
import os.path
import tempfile
+from unittest import mock
import uuid
-import mock
from oslo_log import log as logging
import oslo_messaging as messaging
@@ -361,21 +361,6 @@ class PatchExistsTestCase(test.NoDBTestCase):
self.assertTrue(os.path.exists(os.path.dirname(__file__)))
self.assertFalse(os.path.exists('non-existent/file'))
- @test.patch_exists('fake_file1', True)
- @test.patch_exists('fake_file2', True)
- @test.patch_exists(__file__, False)
- def test_patch_exists_multiple_decorators(self):
- """Test that @patch_exists can be used multiple times on the
- same method.
- """
- self.assertTrue(os.path.exists('fake_file1'))
- self.assertTrue(os.path.exists('fake_file2'))
- self.assertFalse(os.path.exists(__file__))
-
- # Check non-patched parameters
- self.assertTrue(os.path.exists(os.path.dirname(__file__)))
- self.assertFalse(os.path.exists('non-existent/file'))
-
class PatchOpenTestCase(test.NoDBTestCase):
fake_contents = "These file contents don't really exist"
diff --git a/nova/tests/unit/test_utils.py b/nova/tests/unit/test_utils.py
index bd69ccbb65..ca4e09b087 100644
--- a/nova/tests/unit/test_utils.py
+++ b/nova/tests/unit/test_utils.py
@@ -16,13 +16,13 @@ import datetime
import os
import os.path
import tempfile
+from unittest import mock
import eventlet
import fixtures
from keystoneauth1 import adapter as ks_adapter
from keystoneauth1.identity import base as ks_identity
from keystoneauth1 import session as ks_session
-import mock
import netaddr
from openstack import exceptions as sdk_exc
from oslo_config import cfg
diff --git a/nova/tests/unit/test_weights.py b/nova/tests/unit/test_weights.py
index 5758e9aa2f..ad0a203ff4 100644
--- a/nova/tests/unit/test_weights.py
+++ b/nova/tests/unit/test_weights.py
@@ -16,7 +16,7 @@
Tests For weights.
"""
-import mock
+from unittest import mock
from nova.scheduler import weights as scheduler_weights
from nova.scheduler.weights import ram
diff --git a/nova/tests/unit/test_wsgi.py b/nova/tests/unit/test_wsgi.py
index e46318cd17..45a0406b5c 100644
--- a/nova/tests/unit/test_wsgi.py
+++ b/nova/tests/unit/test_wsgi.py
@@ -19,10 +19,10 @@
import os.path
import socket
import tempfile
+from unittest import mock
import eventlet
import eventlet.wsgi
-import mock
from oslo_config import cfg
import requests
import testtools
diff --git a/nova/tests/unit/utils.py b/nova/tests/unit/utils.py
index 6311475522..51edc45686 100644
--- a/nova/tests/unit/utils.py
+++ b/nova/tests/unit/utils.py
@@ -17,8 +17,7 @@ import errno
import platform
import socket
import sys
-
-import mock
+from unittest import mock
from nova.compute import flavors
import nova.conf
diff --git a/nova/tests/unit/virt/disk/mount/test_api.py b/nova/tests/unit/virt/disk/mount/test_api.py
index d2d040dd84..7d8a741914 100644
--- a/nova/tests/unit/virt/disk/mount/test_api.py
+++ b/nova/tests/unit/virt/disk/mount/test_api.py
@@ -12,7 +12,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from oslo_service import fixture as service_fixture
from nova import test
@@ -202,7 +203,7 @@ class MountTestCase(test.NoDBTestCase):
device)
self.assertIsInstance(inst, block.BlockMount)
- def test_instance_for_device_block_partiton(self,):
+ def test_instance_for_device_block_partiton(self):
image = mock.MagicMock()
mount_dir = '/mount/dir'
partition = 1
diff --git a/nova/tests/unit/virt/disk/mount/test_loop.py b/nova/tests/unit/virt/disk/mount/test_loop.py
index 3c0c18fa60..312b88db35 100644
--- a/nova/tests/unit/virt/disk/mount/test_loop.py
+++ b/nova/tests/unit/virt/disk/mount/test_loop.py
@@ -14,8 +14,9 @@
# under the License.
+from unittest import mock
+
import fixtures
-import mock
from nova import test
from nova.virt.disk.mount import loop
diff --git a/nova/tests/unit/virt/disk/mount/test_nbd.py b/nova/tests/unit/virt/disk/mount/test_nbd.py
index 0024b2f6d6..cc0e04337e 100644
--- a/nova/tests/unit/virt/disk/mount/test_nbd.py
+++ b/nova/tests/unit/virt/disk/mount/test_nbd.py
@@ -14,10 +14,10 @@
# under the License.
-import mock
import os
import tempfile
import time
+from unittest import mock
import eventlet
import fixtures
diff --git a/nova/tests/unit/virt/disk/test_api.py b/nova/tests/unit/virt/disk/test_api.py
index 5b90fd186e..135558e145 100644
--- a/nova/tests/unit/virt/disk/test_api.py
+++ b/nova/tests/unit/virt/disk/test_api.py
@@ -14,8 +14,8 @@
# under the License.
import tempfile
+from unittest import mock
-import mock
from oslo_concurrency import processutils
from oslo_utils import units
@@ -40,6 +40,7 @@ class FakeMount(object):
class APITestCase(test.NoDBTestCase):
+ @mock.patch('nova.virt.disk.vfs.guestfs.VFSGuestFS', new=mock.Mock())
def test_can_resize_need_fs_type_specified(self):
imgfile = tempfile.NamedTemporaryFile()
self.addCleanup(imgfile.close)
diff --git a/nova/tests/unit/virt/disk/vfs/test_guestfs.py b/nova/tests/unit/virt/disk/vfs/test_guestfs.py
index b1c619c955..9dc937202a 100644
--- a/nova/tests/unit/virt/disk/vfs/test_guestfs.py
+++ b/nova/tests/unit/virt/disk/vfs/test_guestfs.py
@@ -13,9 +13,9 @@
# under the License.
import collections
+from unittest import mock
import fixtures
-import mock
from nova import exception
from nova import test
diff --git a/nova/tests/unit/virt/hyperv/__init__.py b/nova/tests/unit/virt/hyperv/__init__.py
index e69de29bb2..2190f0570f 100644
--- a/nova/tests/unit/virt/hyperv/__init__.py
+++ b/nova/tests/unit/virt/hyperv/__init__.py
@@ -0,0 +1,20 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import unittest
+
+try:
+ import os_win # noqa: F401
+except ImportError:
+ raise unittest.SkipTest(
+ "The 'os-win' dependency is not installed."
+ )
diff --git a/nova/tests/unit/virt/hyperv/test_base.py b/nova/tests/unit/virt/hyperv/test_base.py
index e895fc600e..1dd7db367b 100644
--- a/nova/tests/unit/virt/hyperv/test_base.py
+++ b/nova/tests/unit/virt/hyperv/test_base.py
@@ -14,7 +14,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from os_win import utilsfactory
from nova import test
diff --git a/nova/tests/unit/virt/hyperv/test_block_device_manager.py b/nova/tests/unit/virt/hyperv/test_block_device_manager.py
index ded2ffa0d4..0d914a55a5 100644
--- a/nova/tests/unit/virt/hyperv/test_block_device_manager.py
+++ b/nova/tests/unit/virt/hyperv/test_block_device_manager.py
@@ -12,8 +12,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
from os_win import constants as os_win_const
+from unittest import mock
from nova import exception
from nova.tests.unit.virt.hyperv import test_base
diff --git a/nova/tests/unit/virt/hyperv/test_driver.py b/nova/tests/unit/virt/hyperv/test_driver.py
index 07f251390e..c9ccc6e8f1 100644
--- a/nova/tests/unit/virt/hyperv/test_driver.py
+++ b/nova/tests/unit/virt/hyperv/test_driver.py
@@ -19,8 +19,8 @@ Unit tests for the Hyper-V Driver.
import platform
import sys
+from unittest import mock
-import mock
from os_win import exceptions as os_win_exc
from nova import exception
diff --git a/nova/tests/unit/virt/hyperv/test_eventhandler.py b/nova/tests/unit/virt/hyperv/test_eventhandler.py
index 658a49c5c1..9825bc9141 100644
--- a/nova/tests/unit/virt/hyperv/test_eventhandler.py
+++ b/nova/tests/unit/virt/hyperv/test_eventhandler.py
@@ -13,10 +13,10 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
from os_win import constants
from os_win import exceptions as os_win_exc
from os_win import utilsfactory
+from unittest import mock
from nova.tests.unit.virt.hyperv import test_base
from nova import utils
diff --git a/nova/tests/unit/virt/hyperv/test_hostops.py b/nova/tests/unit/virt/hyperv/test_hostops.py
index ebe2979f8a..04434dd37e 100644
--- a/nova/tests/unit/virt/hyperv/test_hostops.py
+++ b/nova/tests/unit/virt/hyperv/test_hostops.py
@@ -14,8 +14,8 @@
# under the License.
import datetime
+from unittest import mock
-import mock
import os_resource_classes as orc
from os_win import constants as os_win_const
from oslo_config import cfg
diff --git a/nova/tests/unit/virt/hyperv/test_imagecache.py b/nova/tests/unit/virt/hyperv/test_imagecache.py
index 4c0c1318ae..827d52133d 100644
--- a/nova/tests/unit/virt/hyperv/test_imagecache.py
+++ b/nova/tests/unit/virt/hyperv/test_imagecache.py
@@ -14,10 +14,10 @@
# under the License.
import os
+from unittest import mock
import ddt
import fixtures
-import mock
from oslo_config import cfg
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import units
diff --git a/nova/tests/unit/virt/hyperv/test_livemigrationops.py b/nova/tests/unit/virt/hyperv/test_livemigrationops.py
index 8a3df843b9..79cb4318c5 100644
--- a/nova/tests/unit/virt/hyperv/test_livemigrationops.py
+++ b/nova/tests/unit/virt/hyperv/test_livemigrationops.py
@@ -13,8 +13,9 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
from os_win import exceptions as os_win_exc
+from unittest import mock
+
from oslo_config import cfg
from nova import exception
diff --git a/nova/tests/unit/virt/hyperv/test_migrationops.py b/nova/tests/unit/virt/hyperv/test_migrationops.py
index 86844b11cf..d0b7ff32fd 100644
--- a/nova/tests/unit/virt/hyperv/test_migrationops.py
+++ b/nova/tests/unit/virt/hyperv/test_migrationops.py
@@ -13,8 +13,8 @@
# under the License.
import os
+from unittest import mock
-import mock
from os_win import exceptions as os_win_exc
from oslo_utils import units
diff --git a/nova/tests/unit/virt/hyperv/test_pathutils.py b/nova/tests/unit/virt/hyperv/test_pathutils.py
index 573fe557a5..7bd9e91e3f 100644
--- a/nova/tests/unit/virt/hyperv/test_pathutils.py
+++ b/nova/tests/unit/virt/hyperv/test_pathutils.py
@@ -14,8 +14,7 @@
import os
import time
-
-import mock
+from unittest import mock
from nova import exception
from nova.tests.unit.virt.hyperv import test_base
diff --git a/nova/tests/unit/virt/hyperv/test_rdpconsoleops.py b/nova/tests/unit/virt/hyperv/test_rdpconsoleops.py
index ffc1e4cd0c..5e6bf9a3c3 100644
--- a/nova/tests/unit/virt/hyperv/test_rdpconsoleops.py
+++ b/nova/tests/unit/virt/hyperv/test_rdpconsoleops.py
@@ -17,7 +17,7 @@
Unit tests for the Hyper-V RDPConsoleOps.
"""
-import mock
+from unittest import mock
from nova.tests.unit.virt.hyperv import test_base
from nova.virt.hyperv import rdpconsoleops
diff --git a/nova/tests/unit/virt/hyperv/test_serialconsolehandler.py b/nova/tests/unit/virt/hyperv/test_serialconsolehandler.py
index 4240b8eb95..e9461408c4 100644
--- a/nova/tests/unit/virt/hyperv/test_serialconsolehandler.py
+++ b/nova/tests/unit/virt/hyperv/test_serialconsolehandler.py
@@ -13,7 +13,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from nova import exception
from nova.tests.unit.virt.hyperv import test_base
diff --git a/nova/tests/unit/virt/hyperv/test_serialconsoleops.py b/nova/tests/unit/virt/hyperv/test_serialconsoleops.py
index 1e8a9c7557..4a4b7c8e4f 100644
--- a/nova/tests/unit/virt/hyperv/test_serialconsoleops.py
+++ b/nova/tests/unit/virt/hyperv/test_serialconsoleops.py
@@ -13,7 +13,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from nova import exception
from nova.tests.unit.virt.hyperv import test_base
diff --git a/nova/tests/unit/virt/hyperv/test_serialproxy.py b/nova/tests/unit/virt/hyperv/test_serialproxy.py
index 4d1cf80f80..b7e08a67dd 100644
--- a/nova/tests/unit/virt/hyperv/test_serialproxy.py
+++ b/nova/tests/unit/virt/hyperv/test_serialproxy.py
@@ -14,8 +14,8 @@
# under the License.
import socket
+from unittest import mock
-import mock
from nova import exception
from nova.tests.unit.virt.hyperv import test_base
diff --git a/nova/tests/unit/virt/hyperv/test_snapshotops.py b/nova/tests/unit/virt/hyperv/test_snapshotops.py
index 60f5876296..1bb2f8dd4b 100644
--- a/nova/tests/unit/virt/hyperv/test_snapshotops.py
+++ b/nova/tests/unit/virt/hyperv/test_snapshotops.py
@@ -14,8 +14,8 @@
# under the License.
import os
+from unittest import mock
-import mock
from nova.compute import task_states
from nova.tests.unit import fake_instance
diff --git a/nova/tests/unit/virt/hyperv/test_vif.py b/nova/tests/unit/virt/hyperv/test_vif.py
index c1f5951b79..d4c8d7af58 100644
--- a/nova/tests/unit/virt/hyperv/test_vif.py
+++ b/nova/tests/unit/virt/hyperv/test_vif.py
@@ -14,7 +14,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
import nova.conf
from nova import exception
diff --git a/nova/tests/unit/virt/hyperv/test_vmops.py b/nova/tests/unit/virt/hyperv/test_vmops.py
index dd4dc52d5b..07e1774f9a 100644
--- a/nova/tests/unit/virt/hyperv/test_vmops.py
+++ b/nova/tests/unit/virt/hyperv/test_vmops.py
@@ -13,10 +13,10 @@
# under the License.
import os
+from unittest import mock
import ddt
from eventlet import timeout as etimeout
-import mock
from os_win import constants as os_win_const
from os_win import exceptions as os_win_exc
from oslo_concurrency import processutils
@@ -1374,12 +1374,10 @@ class VMOpsTestCase(test_base.HyperVBaseTestCase):
def test_get_vm_state(self):
summary_info = {'EnabledState': os_win_const.HYPERV_VM_STATE_DISABLED}
- with mock.patch.object(self._vmops._vmutils,
- 'get_vm_summary_info') as mock_get_summary_info:
- mock_get_summary_info.return_value = summary_info
+ self._vmops._vmutils.get_vm_summary_info.return_value = summary_info
- response = self._vmops._get_vm_state(mock.sentinel.FAKE_VM_NAME)
- self.assertEqual(response, os_win_const.HYPERV_VM_STATE_DISABLED)
+ response = self._vmops._get_vm_state(mock.sentinel.FAKE_VM_NAME)
+ self.assertEqual(response, os_win_const.HYPERV_VM_STATE_DISABLED)
@mock.patch.object(vmops.VMOps, '_get_vm_state')
def test_wait_for_power_off_true(self, mock_get_state):
@@ -1418,12 +1416,11 @@ class VMOpsTestCase(test_base.HyperVBaseTestCase):
def test_list_instance_uuids(self):
fake_uuid = '4f54fb69-d3a2-45b7-bb9b-b6e6b3d893b3'
- with mock.patch.object(self._vmops._vmutils,
- 'list_instance_notes') as mock_list_notes:
- mock_list_notes.return_value = [('fake_name', [fake_uuid])]
+ self._vmops._vmutils.list_instance_notes.return_value = (
+ [('fake_name', [fake_uuid])])
- response = self._vmops.list_instance_uuids()
- mock_list_notes.assert_called_once_with()
+ response = self._vmops.list_instance_uuids()
+ self._vmops._vmutils.list_instance_notes.assert_called_once_with()
self.assertEqual(response, [fake_uuid])
@@ -1830,7 +1827,7 @@ class VMOpsTestCase(test_base.HyperVBaseTestCase):
self.assertEqual(fake_local_disks, ret_val)
def test_get_scoped_flavor_extra_specs(self):
- # The flavor extra spect dict contains only string values.
+ # The flavor extra specs dict contains only string values.
fake_total_bytes_sec = '8'
mock_instance = fake_instance.fake_instance_obj(self.context)
diff --git a/nova/tests/unit/virt/hyperv/test_volumeops.py b/nova/tests/unit/virt/hyperv/test_volumeops.py
index da7262085d..66d2c2527f 100644
--- a/nova/tests/unit/virt/hyperv/test_volumeops.py
+++ b/nova/tests/unit/virt/hyperv/test_volumeops.py
@@ -14,7 +14,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from os_brick.initiator import connector
from oslo_config import cfg
from oslo_utils import units
diff --git a/nova/tests/unit/virt/ironic/test_client_wrapper.py b/nova/tests/unit/virt/ironic/test_client_wrapper.py
index 9c2ffe3dca..512f1438d6 100644
--- a/nova/tests/unit/virt/ironic/test_client_wrapper.py
+++ b/nova/tests/unit/virt/ironic/test_client_wrapper.py
@@ -13,11 +13,12 @@
# License for the specific language governing permissions and limitations
# under the License.
+from unittest import mock
+
from ironicclient import client as ironic_client
from ironicclient import exc as ironic_exception
from keystoneauth1 import discover as ksa_disc
import keystoneauth1.session
-import mock
from oslo_config import cfg
import nova.conf
diff --git a/nova/tests/unit/virt/ironic/test_driver.py b/nova/tests/unit/virt/ironic/test_driver.py
index 7b377b21c2..52aa37ac13 100644
--- a/nova/tests/unit/virt/ironic/test_driver.py
+++ b/nova/tests/unit/virt/ironic/test_driver.py
@@ -15,9 +15,10 @@
"""Tests for the ironic driver."""
+from unittest import mock
+
import fixtures
from ironicclient import exc as ironic_exception
-import mock
from openstack import exceptions as sdk_exc
from oslo_config import cfg
from oslo_service import loopingcall
@@ -934,6 +935,48 @@ class IronicDriverTestCase(test.NoDBTestCase):
expected = {
'CUSTOM_IRON_NFV': {
'total': 1,
+ 'reserved': 1,
+ 'min_unit': 1,
+ 'max_unit': 1,
+ 'step_size': 1,
+ 'allocation_ratio': 1.0,
+ },
+ }
+ mock_nfc.assert_called_once_with(mock.sentinel.nodename)
+ mock_nr.assert_called_once_with(mock_nfc.return_value)
+ mock_res_used.assert_called_once_with(mock_nfc.return_value)
+ mock_res_unavail.assert_called_once_with(mock_nfc.return_value)
+ result = self.ptree.data(mock.sentinel.nodename).inventory
+ self.assertEqual(expected, result)
+
+ @mock.patch.object(ironic_driver.IronicDriver,
+ '_node_resources_used', return_value=True)
+ @mock.patch.object(ironic_driver.IronicDriver,
+ '_node_resources_unavailable', return_value=False)
+ @mock.patch.object(ironic_driver.IronicDriver, '_node_resource')
+ @mock.patch.object(ironic_driver.IronicDriver, '_node_from_cache')
+ def test_update_provider_tree_with_rc_occupied_workaround(self,
+ mock_nfc, mock_nr, mock_res_unavail, mock_res_used):
+ """Ensure that when a node is used, we report the inventory matching
+ the consumed resources.
+ """
+ self.flags(skip_reserve_in_use_ironic_nodes=True,
+ group="workarounds")
+ mock_nr.return_value = {
+ 'vcpus': 24,
+ 'vcpus_used': 24,
+ 'memory_mb': 1024,
+ 'memory_mb_used': 1024,
+ 'local_gb': 100,
+ 'local_gb_used': 100,
+ 'resource_class': 'iron-nfv',
+ }
+
+ self.driver.update_provider_tree(self.ptree, mock.sentinel.nodename)
+
+ expected = {
+ 'CUSTOM_IRON_NFV': {
+ 'total': 1,
'reserved': 0,
'min_unit': 1,
'max_unit': 1,
@@ -944,7 +987,7 @@ class IronicDriverTestCase(test.NoDBTestCase):
mock_nfc.assert_called_once_with(mock.sentinel.nodename)
mock_nr.assert_called_once_with(mock_nfc.return_value)
mock_res_used.assert_called_once_with(mock_nfc.return_value)
- self.assertFalse(mock_res_unavail.called)
+ mock_res_unavail.assert_called_once_with(mock_nfc.return_value)
result = self.ptree.data(mock.sentinel.nodename).inventory
self.assertEqual(expected, result)
@@ -1015,7 +1058,7 @@ class IronicDriverTestCase(test.NoDBTestCase):
mock_nfc.assert_called_once_with(mock.sentinel.nodename)
mock_nr.assert_called_once_with(mock_nfc.return_value)
mock_res_used.assert_called_once_with(mock_nfc.return_value)
- self.assertFalse(mock_res_unavail.called)
+ mock_res_unavail.assert_called_once_with(mock_nfc.return_value)
result = self.ptree.data(mock.sentinel.nodename).traits
self.assertEqual(set(), result)
@@ -1047,7 +1090,7 @@ class IronicDriverTestCase(test.NoDBTestCase):
mock_nfc.assert_called_once_with(mock.sentinel.nodename)
mock_nr.assert_called_once_with(mock_nfc.return_value)
mock_res_used.assert_called_once_with(mock_nfc.return_value)
- self.assertFalse(mock_res_unavail.called)
+ mock_res_unavail.assert_called_once_with(mock_nfc.return_value)
result = self.ptree.data(mock.sentinel.nodename).traits
self.assertEqual(set(traits), result)
@@ -2499,7 +2542,10 @@ class IronicDriverTestCase(test.NoDBTestCase):
@mock.patch.object(cw.IronicClientWrapper, 'call')
def test_prepare_for_spawn(self, mock_call):
- node = ironic_utils.get_test_node(driver='fake')
+ node = ironic_utils.get_test_node(
+ driver='fake', instance_uuid=None,
+ provision_state=ironic_states.AVAILABLE,
+ power_state=ironic_states.POWER_OFF)
self.mock_conn.get_node.return_value = node
instance = fake_instance.fake_instance_obj(self.ctx,
node=node.uuid)
@@ -2531,7 +2577,10 @@ class IronicDriverTestCase(test.NoDBTestCase):
instance)
def test_prepare_for_spawn_conflict(self):
- node = ironic_utils.get_test_node(driver='fake')
+ node = ironic_utils.get_test_node(
+ driver='fake', instance_uuid=None,
+ provision_state=ironic_states.AVAILABLE,
+ power_state=ironic_states.POWER_OFF)
self.mock_conn.get_node.return_value = node
self.mock_conn.update_node.side_effect = sdk_exc.ConflictException
instance = fake_instance.fake_instance_obj(self.ctx, node=node.id)
@@ -2539,6 +2588,18 @@ class IronicDriverTestCase(test.NoDBTestCase):
self.driver.prepare_for_spawn,
instance)
+ def test_prepare_for_spawn_not_available(self):
+ node = ironic_utils.get_test_node(
+ driver='fake', instance_uuid=None,
+ provision_state=ironic_states.CLEANWAIT,
+ power_state=ironic_states.POWER_OFF)
+ self.mock_conn.get_node.return_value = node
+ self.mock_conn.update_node.side_effect = sdk_exc.ConflictException
+ instance = fake_instance.fake_instance_obj(self.ctx, node=node.id)
+ self.assertRaises(exception.ComputeResourcesUnavailable,
+ self.driver.prepare_for_spawn,
+ instance)
+
@mock.patch.object(ironic_driver.IronicDriver, '_cleanup_deploy')
def test_failed_spawn_cleanup(self, mock_cleanup):
node = ironic_utils.get_test_node(driver='fake')
@@ -2597,9 +2658,6 @@ class IronicDriverSyncTestCase(IronicDriverTestCase):
# that the thread completes.
self.useFixture(nova_fixtures.SpawnIsSynchronousFixture())
- self.mock_conn = self.useFixture(
- fixtures.MockPatchObject(self.driver, '_ironic_connection')).mock
-
@mock.patch.object(loopingcall, 'FixedIntervalLoopingCall')
@mock.patch.object(FAKE_CLIENT.node, 'set_provision_state')
def test_rescue(self, mock_sps, mock_looping):
diff --git a/nova/tests/unit/virt/powervm/tasks/__init__.py b/nova/tests/unit/virt/libvirt/cpu/__init__.py
index e69de29bb2..e69de29bb2 100644
--- a/nova/tests/unit/virt/powervm/tasks/__init__.py
+++ b/nova/tests/unit/virt/libvirt/cpu/__init__.py
diff --git a/nova/tests/unit/virt/libvirt/cpu/test_api.py b/nova/tests/unit/virt/libvirt/cpu/test_api.py
new file mode 100644
index 0000000000..b5bcb762f3
--- /dev/null
+++ b/nova/tests/unit/virt/libvirt/cpu/test_api.py
@@ -0,0 +1,194 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from unittest import mock
+
+from nova import exception
+from nova import objects
+from nova import test
+from nova.virt.libvirt.cpu import api
+from nova.virt.libvirt.cpu import core
+
+
+class TestAPI(test.NoDBTestCase):
+
+ def setUp(self):
+ super(TestAPI, self).setUp()
+ self.core_1 = api.Core(1)
+
+ # Create a fake instance with two pinned CPUs but only one is on the
+ # dedicated set
+ numa_topology = objects.InstanceNUMATopology(cells=[
+ objects.InstanceNUMACell(cpu_pinning_raw={'0': '0', '2': '2'}),
+ ])
+ self.fake_inst = objects.Instance(numa_topology=numa_topology)
+
+ @mock.patch.object(core, 'get_online')
+ def test_online(self, mock_get_online):
+ mock_get_online.return_value = True
+ self.assertTrue(self.core_1.online)
+ mock_get_online.assert_called_once_with(self.core_1.ident)
+
+ @mock.patch.object(core, 'set_online')
+ def test_set_online(self, mock_set_online):
+ self.core_1.online = True
+ mock_set_online.assert_called_once_with(self.core_1.ident)
+
+ @mock.patch.object(core, 'set_offline')
+ def test_set_offline(self, mock_set_offline):
+ self.core_1.online = False
+ mock_set_offline.assert_called_once_with(self.core_1.ident)
+
+ def test_hash(self):
+ self.assertEqual(hash(self.core_1.ident), hash(self.core_1))
+
+ @mock.patch.object(core, 'get_governor')
+ def test_governor(self, mock_get_governor):
+ mock_get_governor.return_value = 'fake_governor'
+ self.assertEqual('fake_governor', self.core_1.governor)
+ mock_get_governor.assert_called_once_with(self.core_1.ident)
+
+ @mock.patch.object(core, 'set_governor')
+ def test_set_governor_low(self, mock_set_governor):
+ self.flags(cpu_power_governor_low='fake_low_gov', group='libvirt')
+ self.core_1.set_low_governor()
+ mock_set_governor.assert_called_once_with(self.core_1.ident,
+ 'fake_low_gov')
+
+ @mock.patch.object(core, 'set_governor')
+ def test_set_governor_high(self, mock_set_governor):
+ self.flags(cpu_power_governor_high='fake_high_gov', group='libvirt')
+ self.core_1.set_high_governor()
+ mock_set_governor.assert_called_once_with(self.core_1.ident,
+ 'fake_high_gov')
+
+ @mock.patch.object(core, 'set_online')
+ def test_power_up_online(self, mock_online):
+ self.flags(cpu_power_management=True, group='libvirt')
+ self.flags(cpu_dedicated_set='0-1', group='compute')
+
+ api.power_up(self.fake_inst)
+ # only core #0 can be set as core #2 is not on the dedicated set
+ # As a reminder, core(i).online calls set_online(i)
+ mock_online.assert_called_once_with(0)
+
+ @mock.patch.object(core, 'set_governor')
+ def test_power_up_governor(self, mock_set_governor):
+ self.flags(cpu_power_management=True, group='libvirt')
+ self.flags(cpu_power_management_strategy='governor', group='libvirt')
+ self.flags(cpu_dedicated_set='0-1', group='compute')
+
+ api.power_up(self.fake_inst)
+ # only core #0 can be set as core #2 is not on the dedicated set
+ # As a reminder, core(i).set_high_governor calls set_governor(i)
+ mock_set_governor.assert_called_once_with(0, 'performance')
+
+ @mock.patch.object(core, 'set_online')
+ def test_power_up_skipped(self, mock_online):
+ self.flags(cpu_power_management=False, group='libvirt')
+ api.power_up(self.fake_inst)
+ mock_online.assert_not_called()
+
+ @mock.patch.object(core, 'set_online')
+ def test_power_up_skipped_if_standard_instance(self, mock_online):
+ self.flags(cpu_power_management=True, group='libvirt')
+ api.power_up(objects.Instance(numa_topology=None))
+ mock_online.assert_not_called()
+
+ @mock.patch.object(core, 'set_offline')
+ def test_power_down_offline(self, mock_offline):
+ self.flags(cpu_power_management=True, group='libvirt')
+ self.flags(cpu_dedicated_set='0-1', group='compute')
+
+ api.power_down(self.fake_inst)
+ # only core #0 can be set as core #2 is not on the dedicated set
+ # As a reminder, core(i).online calls set_online(i)
+ mock_offline.assert_called_once_with(0)
+
+ @mock.patch.object(core, 'set_governor')
+ def test_power_down_governor(self, mock_set_governor):
+ self.flags(cpu_power_management=True, group='libvirt')
+ self.flags(cpu_power_management_strategy='governor', group='libvirt')
+ self.flags(cpu_dedicated_set='0-1', group='compute')
+
+ api.power_down(self.fake_inst)
+ # only core #0 can be set as core #2 is not on the dedicated set
+ # As a reminder, core(i).set_high_governor calls set_governor(i)
+ mock_set_governor.assert_called_once_with(0, 'powersave')
+
+ @mock.patch.object(core, 'set_offline')
+ def test_power_down_skipped(self, mock_offline):
+ self.flags(cpu_power_management=False, group='libvirt')
+ api.power_down(self.fake_inst)
+ mock_offline.assert_not_called()
+
+ @mock.patch.object(core, 'set_offline')
+ def test_power_down_skipped_if_standard_instance(self, mock_offline):
+ self.flags(cpu_power_management=True, group='libvirt')
+ api.power_down(objects.Instance(numa_topology=None))
+ mock_offline.assert_not_called()
+
+ @mock.patch.object(core, 'set_offline')
+ def test_power_down_all_dedicated_cpus_offline(self, mock_offline):
+ self.flags(cpu_power_management=True, group='libvirt')
+ self.flags(cpu_dedicated_set='0-1', group='compute')
+
+ api.power_down_all_dedicated_cpus()
+ # All dedicated CPUs are turned offline
+ mock_offline.assert_has_calls([mock.call(0), mock.call(1)])
+
+ @mock.patch.object(core, 'set_governor')
+ def test_power_down_all_dedicated_cpus_governor(self, mock_set_governor):
+ self.flags(cpu_power_management=True, group='libvirt')
+ self.flags(cpu_power_management_strategy='governor', group='libvirt')
+ self.flags(cpu_dedicated_set='0-1', group='compute')
+
+ api.power_down_all_dedicated_cpus()
+ # All dedicated CPUs are turned offline
+ mock_set_governor.assert_has_calls([mock.call(0, 'powersave'),
+ mock.call(1, 'powersave')])
+
+ @mock.patch.object(core, 'set_offline')
+ def test_power_down_all_dedicated_cpus_skipped(self, mock_offline):
+ self.flags(cpu_power_management=False, group='libvirt')
+ api.power_down_all_dedicated_cpus()
+ mock_offline.assert_not_called()
+
+ def test_power_down_all_dedicated_cpus_wrong_config(self):
+ self.flags(cpu_power_management=True, group='libvirt')
+ self.flags(cpu_dedicated_set=None, group='compute')
+ self.assertRaises(exception.InvalidConfiguration,
+ api.power_down_all_dedicated_cpus)
+
+ @mock.patch.object(core, 'get_governor')
+ @mock.patch.object(core, 'get_online')
+ def test_validate_all_dedicated_cpus_for_governor(self, mock_get_online,
+ mock_get_governor):
+ self.flags(cpu_power_management=True, group='libvirt')
+ self.flags(cpu_dedicated_set='0-1', group='compute')
+ self.flags(cpu_power_management_strategy='governor', group='libvirt')
+ mock_get_governor.return_value = 'performance'
+ mock_get_online.side_effect = (True, False)
+ self.assertRaises(exception.InvalidConfiguration,
+ api.validate_all_dedicated_cpus)
+
+ @mock.patch.object(core, 'get_governor')
+ @mock.patch.object(core, 'get_online')
+ def test_validate_all_dedicated_cpus_for_cpu_state(self, mock_get_online,
+ mock_get_governor):
+ self.flags(cpu_power_management=True, group='libvirt')
+ self.flags(cpu_dedicated_set='0-1', group='compute')
+ self.flags(cpu_power_management_strategy='cpu_state', group='libvirt')
+ mock_get_online.return_value = True
+ mock_get_governor.side_effect = ('powersave', 'performance')
+ self.assertRaises(exception.InvalidConfiguration,
+ api.validate_all_dedicated_cpus)
diff --git a/nova/tests/unit/virt/libvirt/cpu/test_core.py b/nova/tests/unit/virt/libvirt/cpu/test_core.py
new file mode 100644
index 0000000000..a3cba00d3b
--- /dev/null
+++ b/nova/tests/unit/virt/libvirt/cpu/test_core.py
@@ -0,0 +1,122 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from unittest import mock
+
+from nova import exception
+from nova import test
+from nova.tests import fixtures
+from nova.virt.libvirt.cpu import core
+
+
+class TestCore(test.NoDBTestCase):
+
+ @mock.patch.object(core.filesystem, 'read_sys')
+ @mock.patch.object(core.hardware, 'parse_cpu_spec')
+ def test_get_available_cores(self, mock_parse_cpu_spec, mock_read_sys):
+ mock_read_sys.return_value = '1-2'
+ mock_parse_cpu_spec.return_value = set([1, 2])
+ self.assertEqual(set([1, 2]), core.get_available_cores())
+ mock_read_sys.assert_called_once_with(core.AVAILABLE_PATH)
+ mock_parse_cpu_spec.assert_called_once_with('1-2')
+
+ @mock.patch.object(core.filesystem, 'read_sys')
+ @mock.patch.object(core.hardware, 'parse_cpu_spec')
+ def test_get_available_cores_none(
+ self, mock_parse_cpu_spec, mock_read_sys):
+ mock_read_sys.return_value = ''
+ self.assertEqual(set(), core.get_available_cores())
+ mock_parse_cpu_spec.assert_not_called()
+
+ @mock.patch.object(core, 'get_available_cores')
+ def test_exists(self, mock_get_available_cores):
+ mock_get_available_cores.return_value = set([1])
+ self.assertTrue(core.exists(1))
+ mock_get_available_cores.assert_called_once_with()
+ self.assertFalse(core.exists(2))
+
+ @mock.patch.object(
+ core, 'CPU_PATH_TEMPLATE',
+ new_callable=mock.PropertyMock(return_value='/sys/blah%(core)s'))
+ @mock.patch.object(core, 'exists')
+ def test_gen_cpu_path(self, mock_exists, mock_cpu_path):
+ mock_exists.return_value = True
+ self.assertEqual('/sys/blah1', core.gen_cpu_path(1))
+ mock_exists.assert_called_once_with(1)
+
+ @mock.patch.object(core, 'exists')
+ def test_gen_cpu_path_raises(self, mock_exists):
+ mock_exists.return_value = False
+ self.assertRaises(ValueError, core.gen_cpu_path, 1)
+ self.assertIn('Unable to access CPU: 1', self.stdlog.logger.output)
+
+
+class TestCoreHelpers(test.NoDBTestCase):
+
+ def setUp(self):
+ super(TestCoreHelpers, self).setUp()
+ self.useFixture(fixtures.PrivsepFixture())
+ _p1 = mock.patch.object(core, 'exists', return_value=True)
+ self.mock_exists = _p1.start()
+ self.addCleanup(_p1.stop)
+
+ _p2 = mock.patch.object(core, 'gen_cpu_path',
+ side_effect=lambda x: '/fakesys/blah%s' % x)
+ self.mock_gen_cpu_path = _p2.start()
+ self.addCleanup(_p2.stop)
+
+ @mock.patch.object(core.filesystem, 'read_sys')
+ def test_get_online(self, mock_read_sys):
+ mock_read_sys.return_value = '1'
+ self.assertTrue(core.get_online(1))
+ mock_read_sys.assert_called_once_with('/fakesys/blah1/online')
+
+ @mock.patch.object(core.filesystem, 'read_sys')
+ def test_get_online_not_exists(self, mock_read_sys):
+ mock_read_sys.side_effect = exception.FileNotFound(file_path='foo')
+ self.assertTrue(core.get_online(1))
+ mock_read_sys.assert_called_once_with('/fakesys/blah1/online')
+
+ @mock.patch.object(core.filesystem, 'write_sys')
+ @mock.patch.object(core, 'get_online')
+ def test_set_online(self, mock_get_online, mock_write_sys):
+ mock_get_online.return_value = True
+ self.assertTrue(core.set_online(1))
+ mock_write_sys.assert_called_once_with('/fakesys/blah1/online',
+ data='1')
+ mock_get_online.assert_called_once_with(1)
+
+ @mock.patch.object(core.filesystem, 'write_sys')
+ @mock.patch.object(core, 'get_online')
+ def test_set_offline(self, mock_get_online, mock_write_sys):
+ mock_get_online.return_value = False
+ self.assertTrue(core.set_offline(1))
+ mock_write_sys.assert_called_once_with('/fakesys/blah1/online',
+ data='0')
+ mock_get_online.assert_called_once_with(1)
+
+ @mock.patch.object(core.filesystem, 'read_sys')
+ def test_get_governor(self, mock_read_sys):
+ mock_read_sys.return_value = 'fake_gov'
+ self.assertEqual('fake_gov', core.get_governor(1))
+ mock_read_sys.assert_called_once_with(
+ '/fakesys/blah1/cpufreq/scaling_governor')
+
+ @mock.patch.object(core, 'get_governor')
+ @mock.patch.object(core.filesystem, 'write_sys')
+ def test_set_governor(self, mock_write_sys, mock_get_governor):
+ mock_get_governor.return_value = 'fake_gov'
+ self.assertEqual('fake_gov',
+ core.set_governor(1, 'fake_gov'))
+ mock_write_sys.assert_called_once_with(
+ '/fakesys/blah1/cpufreq/scaling_governor', data='fake_gov')
+ mock_get_governor.assert_called_once_with(1)
diff --git a/nova/tests/unit/virt/libvirt/storage/test_dmcrypt.py b/nova/tests/unit/virt/libvirt/storage/test_dmcrypt.py
index cd45bac54a..28c93e4855 100644
--- a/nova/tests/unit/virt/libvirt/storage/test_dmcrypt.py
+++ b/nova/tests/unit/virt/libvirt/storage/test_dmcrypt.py
@@ -14,7 +14,8 @@
# under the License.
import binascii
-import mock
+from unittest import mock
+
from oslo_concurrency import processutils
from nova import test
diff --git a/nova/tests/unit/virt/libvirt/storage/test_lvm.py b/nova/tests/unit/virt/libvirt/storage/test_lvm.py
index fbec2dcae9..04d9ffdcbf 100644
--- a/nova/tests/unit/virt/libvirt/storage/test_lvm.py
+++ b/nova/tests/unit/virt/libvirt/storage/test_lvm.py
@@ -13,7 +13,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_utils import units
diff --git a/nova/tests/unit/virt/libvirt/test_blockinfo.py b/nova/tests/unit/virt/libvirt/test_blockinfo.py
index 7707f745e3..5a0dbb40ce 100644
--- a/nova/tests/unit/virt/libvirt/test_blockinfo.py
+++ b/nova/tests/unit/virt/libvirt/test_blockinfo.py
@@ -14,9 +14,9 @@
# under the License.
import copy
+from unittest import mock
import fixtures
-import mock
from oslo_utils.fixture import uuidsentinel as uuids
from nova import block_device
@@ -74,6 +74,7 @@ class LibvirtBlockInfoTest(test.NoDBTestCase):
def _test_block_device_info(self, with_eph=True, with_swap=True,
with_bdms=True):
swap = {'device_name': '/dev/vdb', 'swap_size': 1}
+ image = [{'device_type': 'disk', 'boot_index': 0}]
ephemerals = [{'device_type': 'disk', 'guest_format': 'ext4',
'device_name': '/dev/vdc1', 'size': 10},
{'disk_bus': 'ide', 'guest_format': None,
@@ -84,6 +85,7 @@ class LibvirtBlockInfoTest(test.NoDBTestCase):
'device_path': 'fake_device'}]
return {'root_device_name': '/dev/vda',
'swap': swap if with_swap else {},
+ 'image': image,
'ephemerals': ephemerals if with_eph else [],
'block_device_mapping':
block_device_mapping if with_bdms else []}
@@ -178,11 +180,16 @@ class LibvirtBlockInfoTest(test.NoDBTestCase):
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
+ block_device_info = {
+ 'image': [
+ {'device_type': 'disk', 'boot_index': 0},
+ ]
+ }
with mock.patch.object(instance_ref, 'get_flavor',
return_value=instance_ref.flavor) as get_flavor:
- mapping = blockinfo.get_disk_mapping("kvm", instance_ref,
- "virtio", "ide",
- image_meta)
+ mapping = blockinfo.get_disk_mapping(
+ "kvm", instance_ref, "virtio", "ide", image_meta,
+ block_device_info=block_device_info)
# Since there was no block_device_info passed to get_disk_mapping we
# expect to get the swap info from the flavor in the instance.
get_flavor.assert_called_once_with()
@@ -202,7 +209,8 @@ class LibvirtBlockInfoTest(test.NoDBTestCase):
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
block_device_info = {
- 'root_device_name': '/dev/sda'
+ 'root_device_name': '/dev/sda',
+ 'image': [{'device_type': 'disk', 'boot_index': 0}],
}
mapping = blockinfo.get_disk_mapping("kvm", instance_ref,
@@ -490,9 +498,12 @@ class LibvirtBlockInfoTest(test.NoDBTestCase):
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
- mapping = blockinfo.get_disk_mapping("lxc", instance_ref,
- "lxc", "lxc",
- image_meta)
+ block_device_info = {
+ 'image': [{'device_type': 'disk', 'boot_index': 0}],
+ }
+ mapping = blockinfo.get_disk_mapping(
+ "lxc", instance_ref, "lxc", "lxc", image_meta,
+ block_device_info=block_device_info)
expect = {
'disk': {'bus': 'lxc', 'dev': None,
'type': 'disk', 'boot_index': '1'},
@@ -527,9 +538,14 @@ class LibvirtBlockInfoTest(test.NoDBTestCase):
instance_ref.flavor.swap = 5
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
- mapping = blockinfo.get_disk_mapping("kvm", instance_ref,
- "virtio", "ide",
- image_meta)
+ block_device_info = {
+ 'image': [
+ {'device_type': 'disk', 'boot_index': 0},
+ ]
+ }
+ mapping = blockinfo.get_disk_mapping(
+ "kvm", instance_ref, "virtio", "ide", image_meta,
+ block_device_info=block_device_info)
expect = {
'disk': {'bus': 'virtio', 'dev': 'vda',
@@ -549,6 +565,7 @@ class LibvirtBlockInfoTest(test.NoDBTestCase):
instance_ref.ephemeral_gb = 0
block_dev_info = {'swap': None, 'root_device_name': u'/dev/vda',
+ 'image': [],
'ephemerals': [],
'block_device_mapping': [{'boot_index': None,
'mount_device': u'/dev/vdb',
@@ -591,8 +608,14 @@ class LibvirtBlockInfoTest(test.NoDBTestCase):
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
+ block_device_info = {
+ 'image': [
+ {'device_type': 'disk', 'boot_index': 0},
+ ]
+ }
mapping = blockinfo.get_disk_mapping(
- "kvm", instance_ref, "virtio", "ide", image_meta)
+ "kvm", instance_ref, "virtio", "ide", image_meta,
+ block_device_info=block_device_info)
# Pick the first drive letter on the bus that is available
# as the config drive. Delete the last device hardcode as
@@ -647,8 +670,14 @@ class LibvirtBlockInfoTest(test.NoDBTestCase):
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
+ block_device_info = {
+ 'image': [
+ {'device_type': 'disk', 'boot_index': 0},
+ ]
+ }
mapping = blockinfo.get_disk_mapping(
- "kvm", instance_ref, "virtio", "ide", image_meta)
+ "kvm", instance_ref, "virtio", "ide", image_meta,
+ block_device_info=block_device_info)
expect = {
'disk': {
@@ -697,9 +726,14 @@ class LibvirtBlockInfoTest(test.NoDBTestCase):
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
- mapping = blockinfo.get_disk_mapping("kvm", instance_ref,
- "virtio", "ide",
- image_meta)
+ block_device_info = {
+ 'image': [
+ {'device_type': 'disk', 'boot_index': 0},
+ ]
+ }
+ mapping = blockinfo.get_disk_mapping(
+ "kvm", instance_ref, "virtio", "ide", image_meta,
+ block_device_info=block_device_info)
expect = {
'disk': {'bus': 'virtio', 'dev': 'vda',
@@ -718,6 +752,9 @@ class LibvirtBlockInfoTest(test.NoDBTestCase):
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
block_device_info = {
+ 'image': [
+ {'device_type': 'disk', 'boot_index': 0},
+ ],
'ephemerals': [
{'device_type': 'disk', 'guest_format': 'ext4',
'device_name': '/dev/vdb', 'size': 10},
@@ -754,6 +791,8 @@ class LibvirtBlockInfoTest(test.NoDBTestCase):
block_device_info = {
'swap': {'device_name': '/dev/vdb',
'swap_size': 10},
+ 'image': [{'device_type': 'disk',
+ 'boot_index': 0}],
}
mapping = blockinfo.get_disk_mapping("kvm", instance_ref,
"virtio", "ide",
@@ -775,6 +814,7 @@ class LibvirtBlockInfoTest(test.NoDBTestCase):
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
block_device_info = {
+ 'image': [],
'block_device_mapping': [
{'connection_info': "fake",
'mount_device': "/dev/vda",
@@ -803,6 +843,7 @@ class LibvirtBlockInfoTest(test.NoDBTestCase):
image_meta = {}
block_device_info = {
+ 'image': [],
'block_device_mapping': [
{'connection_info': None,
'mount_device': None,
@@ -858,6 +899,7 @@ class LibvirtBlockInfoTest(test.NoDBTestCase):
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
block_device_info = {
+ 'image': [],
'block_device_mapping': [
{'connection_info': "fake",
'mount_device': "/dev/vda",
@@ -899,6 +941,7 @@ class LibvirtBlockInfoTest(test.NoDBTestCase):
'root_device_name': '/dev/vdf',
'swap': {'device_name': '/dev/vdy',
'swap_size': 10},
+ 'image': [{'device_type': 'disk', 'boot_index': 0}],
'ephemerals': [
{'device_type': 'disk', 'guest_format': 'ext4',
'device_name': '/dev/vdb', 'size': 10},
@@ -940,6 +983,8 @@ class LibvirtBlockInfoTest(test.NoDBTestCase):
'swap': {'device_name': '/dev/vdb',
'device_type': 'really_lame_type',
'swap_size': 10},
+ 'image': [{'device_name': '/dev/vda',
+ 'device_type': 'disk'}],
'ephemerals': [{'disk_bus': 'no_such_bus',
'device_type': 'yeah_right',
'device_name': '/dev/vdc', 'size': 10}],
@@ -951,6 +996,8 @@ class LibvirtBlockInfoTest(test.NoDBTestCase):
}
expected_swap = {'device_name': '/dev/vdb', 'disk_bus': 'virtio',
'device_type': 'disk', 'swap_size': 10}
+ expected_image = {'device_name': '/dev/vda', 'device_type': 'disk',
+ 'disk_bus': 'virtio'}
expected_ephemeral = {'disk_bus': 'virtio',
'device_type': 'disk',
'device_name': '/dev/vdc', 'size': 10}
@@ -970,6 +1017,7 @@ class LibvirtBlockInfoTest(test.NoDBTestCase):
self.assertFalse(get_flavor_mock.called)
self.assertEqual(expected_swap, block_device_info['swap'])
+ self.assertEqual(expected_image, block_device_info['image'][0])
self.assertEqual(expected_ephemeral,
block_device_info['ephemerals'][0])
self.assertEqual(expected_bdm,
@@ -1124,7 +1172,10 @@ class LibvirtBlockInfoTest(test.NoDBTestCase):
'device_type': 'lame_type',
'delete_on_termination': True},
{'disk_bus': 'sata', 'guest_format': None,
- 'device_name': '/dev/sda', 'size': 3}]
+ 'device_name': '/dev/sda', 'size': 3},
+ {'encrypted': True, 'encryption_secret_uuid': uuids.secret,
+ 'encryption_format': 'luks',
+ 'encryption_options': '{"json": "options"}'}]
expected = [{'dev': 'vds', 'type': 'disk', 'bus': 'usb'},
{'dev': 'vdb', 'type': 'disk',
'bus': 'virtio', 'format': 'ext4'},
@@ -1133,7 +1184,11 @@ class LibvirtBlockInfoTest(test.NoDBTestCase):
'bus': 'scsi', 'boot_index': '1'},
{'dev': 'vdo', 'type': 'disk',
'bus': 'scsi', 'boot_index': '2'},
- {'dev': 'sda', 'type': 'disk', 'bus': 'sata'}]
+ {'dev': 'sda', 'type': 'disk', 'bus': 'sata'},
+ {'dev': 'vda', 'type': 'disk', 'bus': 'virtio',
+ 'encrypted': True, 'encryption_secret_uuid': uuids.secret,
+ 'encryption_format': 'luks',
+ 'encryption_options': {'json': 'options'}}]
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
for bdm, expected in zip(bdms, expected):
@@ -1441,6 +1496,15 @@ class DefaultDeviceNamesTestCase(test.NoDBTestCase):
'destination_type': 'volume',
'boot_index': -1}))]
+ self.image = [
+ objects.BlockDeviceMapping(self.context,
+ **fake_block_device.FakeDbBlockDeviceDict(
+ {'id': 6, 'instance_uuid': uuids.instance,
+ 'source_type': 'image',
+ 'destination_type': 'local',
+ 'device_type': 'disk',
+ 'boot_index': 0}))]
+
def tearDown(self):
super(DefaultDeviceNamesTestCase, self).tearDown()
for patcher in self.patchers:
@@ -1450,7 +1514,7 @@ class DefaultDeviceNamesTestCase(test.NoDBTestCase):
'nova.virt.libvirt.utils.get_arch',
return_value=obj_fields.Architecture.X86_64)
def _test_default_device_names(self, eph, swap, bdm, mock_get_arch):
- bdms = eph + swap + bdm
+ bdms = self.image + eph + swap + bdm
bdi = driver.get_block_device_info(self.instance, bdms)
blockinfo.default_device_names(self.virt_type,
self.context,
diff --git a/nova/tests/unit/virt/libvirt/test_config.py b/nova/tests/unit/virt/libvirt/test_config.py
index 2d690e5dfc..3d0b5ae685 100644
--- a/nova/tests/unit/virt/libvirt/test_config.py
+++ b/nova/tests/unit/virt/libvirt/test_config.py
@@ -16,6 +16,7 @@ from lxml import etree
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import units
+from nova import exception
from nova.objects import fields as obj_fields
from nova import test
from nova.tests.fixtures import libvirt_data as fake_libvirt_data
@@ -70,6 +71,23 @@ class LibvirtConfigTest(LibvirtConfigBaseTest):
obj = config.LibvirtConfigObject(root_name="demo")
obj.parse_str(inxml)
+ def test_parse_on_off_str(self):
+ obj = config.LibvirtConfigObject(root_name="demo")
+ self.assertTrue(obj.parse_on_off_str('on'))
+ self.assertFalse(obj.parse_on_off_str('off'))
+ self.assertFalse(obj.parse_on_off_str(None))
+ self.assertRaises(exception.InvalidInput, obj.parse_on_off_str, 'foo')
+
+ def test_get_yes_no_str(self):
+ obj = config.LibvirtConfigObject(root_name="demo")
+ self.assertEqual('yes', obj.get_yes_no_str(True))
+ self.assertEqual('no', obj.get_yes_no_str(False))
+
+ def test_get_on_off_str(self):
+ obj = config.LibvirtConfigObject(root_name="demo")
+ self.assertEqual('on', obj.get_on_off_str(True))
+ self.assertEqual('off', obj.get_on_off_str(False))
+
class LibvirtConfigCapsTest(LibvirtConfigBaseTest):
@@ -1519,7 +1537,7 @@ class LibvirtConfigGuestInputTest(LibvirtConfigBaseTest):
class LibvirtConfigGuestGraphicsTest(LibvirtConfigBaseTest):
- def test_config_graphics(self):
+ def test_config_graphics_vnc(self):
obj = config.LibvirtConfigGuestGraphics()
obj.type = "vnc"
obj.autoport = True
@@ -1531,11 +1549,38 @@ class LibvirtConfigGuestGraphicsTest(LibvirtConfigBaseTest):
<graphics type="vnc" autoport="yes" keymap="en_US" listen="127.0.0.1"/>
""")
+ def test_config_graphics_spice(self):
+ obj = config.LibvirtConfigGuestGraphics()
+ obj.type = "spice"
+ obj.autoport = False
+ obj.keymap = "en_US"
+ obj.listen = "127.0.0.1"
+
+ obj.image_compression = "auto_glz"
+ obj.jpeg_compression = "auto"
+ obj.zlib_compression = "always"
+ obj.playback_compression = True
+ obj.streaming_mode = "filter"
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <graphics type="spice" autoport="no" keymap="en_US" listen="127.0.0.1">
+ <image compression="auto_glz"/>
+ <jpeg compression="auto"/>
+ <zlib compression="always"/>
+ <playback compression="on"/>
+ <streaming mode="filter"/>
+ </graphics>
+ """)
+
class LibvirtConfigGuestHostdev(LibvirtConfigBaseTest):
def test_config_pci_guest_host_dev(self):
- obj = config.LibvirtConfigGuestHostdev(mode='subsystem', type='pci')
+ obj = config.LibvirtConfigGuestHostdev()
+ obj.mode = 'subsystem'
+ obj.type = 'pci'
+
xml = obj.to_xml()
expected = """
<hostdev mode="subsystem" type="pci" managed="yes"/>
@@ -1570,7 +1615,7 @@ class LibvirtConfigGuestHostdevPCI(LibvirtConfigBaseTest):
</hostdev>
"""
- def test_config_guest_hosdev_pci(self):
+ def test_config_guest_hostdev_pci(self):
hostdev = config.LibvirtConfigGuestHostdevPCI()
hostdev.domain = "1234"
hostdev.bus = "11"
@@ -1579,7 +1624,7 @@ class LibvirtConfigGuestHostdevPCI(LibvirtConfigBaseTest):
xml = hostdev.to_xml()
self.assertXmlEqual(self.expected, xml)
- def test_parse_guest_hosdev_pci(self):
+ def test_parse_guest_hostdev_pci(self):
xmldoc = self.expected
obj = config.LibvirtConfigGuestHostdevPCI()
obj.parse_str(xmldoc)
@@ -1591,7 +1636,7 @@ class LibvirtConfigGuestHostdevPCI(LibvirtConfigBaseTest):
self.assertEqual(obj.slot, '0x22')
self.assertEqual(obj.function, '0x3')
- def test_parse_guest_hosdev_usb(self):
+ def test_parse_guest_hostdev_usb(self):
xmldoc = """<hostdev mode='subsystem' type='usb'>
<source startupPolicy='optional'>
<vendor id='0x1234'/>
@@ -2318,6 +2363,15 @@ class LibvirtConfigGuestFeatureTest(LibvirtConfigBaseTest):
obj.vapic = True
obj.spinlocks = True
obj.vendorid_spoof = True
+ obj.vpindex = True
+ obj.runtime = True
+ obj.synic = True
+ obj.reset = True
+ obj.frequencies = True
+ obj.reenlightenment = True
+ obj.tlbflush = True
+ obj.ipi = True
+ obj.evmcs = True
xml = obj.to_xml()
self.assertXmlEqual(xml, """
@@ -2326,6 +2380,15 @@ class LibvirtConfigGuestFeatureTest(LibvirtConfigBaseTest):
<vapic state="on"/>
<spinlocks state="on" retries="4095"/>
<vendor_id state="on" value="1234567890ab"/>
+ <vpindex state='on'/>
+ <runtime state='on'/>
+ <synic state='on'/>
+ <reset state='on'/>
+ <frequencies state='on'/>
+ <reenlightenment state='on'/>
+ <tlbflush state='on'/>
+ <ipi state='on'/>
+ <evmcs state='on'/>
</hyperv>""")
def test_feature_pmu(self):
@@ -2344,6 +2407,13 @@ class LibvirtConfigGuestFeatureTest(LibvirtConfigBaseTest):
xml = obj.to_xml()
self.assertXmlEqual(xml, "<pmu state='off'/>")
+ def test_feature_ioapic(self):
+ obj = config.LibvirtConfigGuestFeatureIOAPIC()
+ obj.driver = "libvirt"
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, "<ioapic driver='libvirt'/>")
+
class LibvirtConfigGuestTest(LibvirtConfigBaseTest):
@@ -3135,6 +3205,32 @@ class LibvirtConfigNodeDeviceTest(LibvirtConfigBaseTest):
config.LibvirtConfigNodeDeviceMdevInformation)
self.assertEqual("nvidia-11", obj.mdev_information.type)
self.assertEqual(12, obj.mdev_information.iommu_group)
+ self.assertIsNone(obj.mdev_information.uuid)
+
+ def test_config_mdev_device_uuid(self):
+ xmlin = """
+ <device>
+ <name>mdev_b2107403_110c_45b0_af87_32cc91597b8a_0000_41_00_0</name>
+ <path>/sys/devices/pci0000:40/0000:40:03.1/0000:41:00.0/b2107403-110c-45b0-af87-32cc91597b8a</path>
+ <parent>pci_0000_41_00_0</parent>
+ <driver>
+ <name>vfio_mdev</name>
+ </driver>
+ <capability type='mdev'>
+ <type id='nvidia-442'/>
+ <uuid>b2107403-110c-45b0-af87-32cc91597b8a</uuid>
+ <iommuGroup number='57'/>
+ </capability>
+ </device>"""
+
+ obj = config.LibvirtConfigNodeDevice()
+ obj.parse_str(xmlin)
+ self.assertIsInstance(obj.mdev_information,
+ config.LibvirtConfigNodeDeviceMdevInformation)
+ self.assertEqual("nvidia-442", obj.mdev_information.type)
+ self.assertEqual(57, obj.mdev_information.iommu_group)
+ self.assertEqual("b2107403-110c-45b0-af87-32cc91597b8a",
+ obj.mdev_information.uuid)
def test_config_vdpa_device(self):
xmlin = """
@@ -3273,6 +3369,86 @@ class LibvirtConfigNodeDevicePciCapTest(LibvirtConfigBaseTest):
'name': 'GRID M60-0B',
'type': 'nvidia-11'}], obj.mdev_capability[0].mdev_types)
+ def test_config_device_pci_vpd(self):
+ xmlin = """
+ <capability type='pci'>
+ <class>0x020000</class>
+ <domain>0</domain>
+ <bus>130</bus>
+ <slot>0</slot>
+ <function>1</function>
+ <product id='0xa2d6'>MT42822 BlueField-2</product>
+ <vendor id='0x15b3'>Mellanox Technologies</vendor>
+ <capability type='virt_functions' maxCount='16'/>
+ <capability type='vpd'>
+ <name>BlueField-2 DPU 25GbE</name>
+ <fields access='readonly'>
+ <change_level>B1</change_level>
+ <manufacture_id>foobar</manufacture_id>
+ <part_number>MBF2H332A-AEEOT</part_number>
+ <serial_number>MT2113X00000</serial_number>
+ <vendor_field index='0'>PCIeGen4 x8</vendor_field>
+ <vendor_field index='2'>MBF2H332A-AEEOT</vendor_field>
+ <vendor_field index='3'>3c53d07eec484d8aab34dabd24fe575aa</vendor_field>
+ <vendor_field index='A'>MLX:MN=MLNX:CSKU=V2:UUID=V3:PCI=V0:MODL=BF2H332A</vendor_field>
+ </fields>
+ <fields access='readwrite'>
+ <asset_tag>fooasset</asset_tag>
+ <vendor_field index='0'>vendorfield0</vendor_field>
+ <vendor_field index='2'>vendorfield2</vendor_field>
+ <vendor_field index='A'>vendorfieldA</vendor_field>
+ <system_field index='B'>systemfieldB</system_field>
+ <system_field index='0'>systemfield0</system_field>
+ </fields>
+ </capability>
+ <iommuGroup number='66'>
+ <address domain='0x0000' bus='0x82' slot='0x00' function='0x1'/>
+ </iommuGroup>
+ <numa node='1'/>
+ <pci-express>
+ <link validity='cap' port='0' speed='16' width='8'/>
+ <link validity='sta' speed='8' width='8'/>
+ </pci-express>
+ </capability>""" # noqa: E501
+ obj = config.LibvirtConfigNodeDevicePciCap()
+ obj.parse_str(xmlin)
+
+ # Asserting common PCI attribute parsing.
+ self.assertEqual(0, obj.domain)
+ self.assertEqual(130, obj.bus)
+ self.assertEqual(0, obj.slot)
+ self.assertEqual(1, obj.function)
+ # Asserting vpd capability parsing.
+ self.assertEqual("MT42822 BlueField-2", obj.product)
+ self.assertEqual(0xA2D6, obj.product_id)
+ self.assertEqual("Mellanox Technologies", obj.vendor)
+ self.assertEqual(0x15B3, obj.vendor_id)
+ self.assertEqual(obj.numa_node, 1)
+ self.assertIsInstance(obj.vpd_capability,
+ config.LibvirtConfigNodeDeviceVpdCap)
+ self.assertEqual(obj.vpd_capability.card_name, 'BlueField-2 DPU 25GbE')
+
+ self.assertEqual(obj.vpd_capability.change_level, 'B1')
+ self.assertEqual(obj.vpd_capability.manufacture_id, 'foobar')
+ self.assertEqual(obj.vpd_capability.part_number, 'MBF2H332A-AEEOT')
+ self.assertEqual(obj.vpd_capability.card_serial_number, 'MT2113X00000')
+ self.assertEqual(obj.vpd_capability.asset_tag, 'fooasset')
+ self.assertEqual(obj.vpd_capability.ro_vendor_fields, {
+ '0': 'PCIeGen4 x8',
+ '2': 'MBF2H332A-AEEOT',
+ '3': '3c53d07eec484d8aab34dabd24fe575aa',
+ 'A': 'MLX:MN=MLNX:CSKU=V2:UUID=V3:PCI=V0:MODL=BF2H332A',
+ })
+ self.assertEqual(obj.vpd_capability.rw_vendor_fields, {
+ '0': 'vendorfield0',
+ '2': 'vendorfield2',
+ 'A': 'vendorfieldA',
+ })
+ self.assertEqual(obj.vpd_capability.rw_system_fields, {
+ '0': 'systemfield0',
+ 'B': 'systemfieldB',
+ })
+
class LibvirtConfigNodeDevicePciSubFunctionCap(LibvirtConfigBaseTest):
@@ -3869,8 +4045,10 @@ class LibvirtConfigSecretTest(LibvirtConfigBaseTest):
class LibvirtConfigGuestVPMEMTest(LibvirtConfigBaseTest):
def test_config_vpmem(self):
- obj = config.LibvirtConfigGuestVPMEM(
- devpath='/dev/dax0.0', size_kb=4096 * units.Ki, align_kb=2048)
+ obj = config.LibvirtConfigGuestVPMEM()
+ obj.source_path = '/dev/dax0.0'
+ obj.target_size = 4096 * units.Ki
+ obj.align_size = 2048
xml = obj.to_xml()
self.assertXmlEqual(xml, """
@@ -3890,6 +4068,28 @@ class LibvirtConfigGuestVPMEMTest(LibvirtConfigBaseTest):
</memory>""")
+class LibvirtConfigGuestIOMMUTest(LibvirtConfigBaseTest):
+
+ def test_config_iommu(self):
+ obj = config.LibvirtConfigGuestIOMMU()
+ obj.model = "intel"
+ obj.interrupt_remapping = True
+ obj.caching_mode = True
+ obj.aw_bits = 48
+ obj.eim = True
+ obj.iotlb = True
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(
+ xml,
+ """
+<iommu model='intel'>
+ <driver intremap='on' caching_mode='on' aw_bits='48' eim='on' iotlb='on'/>
+</iommu>
+ """,
+ )
+
+
class LibvirtConfigDomainCapsVideoModelsTests(LibvirtConfigBaseTest):
def test_parse_video_model(self):
@@ -4006,7 +4206,8 @@ class LibvirtConfigDomainCapsDevicesTests(LibvirtConfigBaseTest):
obj.parse_str(xml)
# we only use the video and disk devices today.
device_types = [config.LibvirtConfigDomainCapsDiskBuses,
- config.LibvirtConfigDomainCapsVideoModels]
+ config.LibvirtConfigDomainCapsVideoModels,
+ ]
# so we assert there are only two device types parsed
self.assertEqual(2, len(obj.devices))
# we then assert that the parsed devices are of the correct type
diff --git a/nova/tests/unit/virt/libvirt/test_designer.py b/nova/tests/unit/virt/libvirt/test_designer.py
index a6ad7f9ccc..cb435286e9 100644
--- a/nova/tests/unit/virt/libvirt/test_designer.py
+++ b/nova/tests/unit/virt/libvirt/test_designer.py
@@ -12,7 +12,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from nova.pci import utils as pci_utils
from nova import test
diff --git a/nova/tests/unit/virt/libvirt/test_driver.py b/nova/tests/unit/virt/libvirt/test_driver.py
index 50cb5536ef..66dbf795d8 100644
--- a/nova/tests/unit/virt/libvirt/test_driver.py
+++ b/nova/tests/unit/virt/libvirt/test_driver.py
@@ -31,6 +31,7 @@ import testtools
import threading
import time
import unittest
+from unittest import mock
from castellan import key_manager
import ddt
@@ -38,7 +39,6 @@ import eventlet
from eventlet import greenthread
import fixtures
from lxml import etree
-import mock
from os_brick import encryptors
from os_brick import exception as brick_exception
from os_brick.initiator import connector
@@ -76,7 +76,6 @@ from nova.objects import block_device as block_device_obj
from nova.objects import fields
from nova.objects import migrate_data as migrate_data_obj
from nova.objects import virtual_interface as obj_vif
-from nova.pci import manager as pci_manager
from nova.pci import utils as pci_utils
import nova.privsep.fs
import nova.privsep.libvirt
@@ -740,16 +739,15 @@ class LibvirtConnTestCase(test.NoDBTestCase,
'resolve_driver_format',
imagebackend.Image._get_driver_format)
- self.useFixture(nova_fixtures.LibvirtFixture())
+ self.libvirt = self.useFixture(nova_fixtures.LibvirtFixture())
+ self.cgroups = self.useFixture(nova_fixtures.CGroupsFixture())
# ensure tests perform the same on all host architectures; this is
# already done by the fakelibvirt fixture but we want to change the
# architecture in some tests
- _p = mock.patch('os.uname')
- self.mock_uname = _p.start()
+ self.mock_uname = self.libvirt.mock_uname
self.mock_uname.return_value = fakelibvirt.os_uname(
'Linux', '', '5.4.0-0-generic', '', fields.Architecture.X86_64)
- self.addCleanup(_p.stop)
self.test_instance = _create_test_instance()
network_info = objects.InstanceInfoCache(
@@ -820,6 +818,16 @@ class LibvirtConnTestCase(test.NoDBTestCase,
"Driver capabilities for 'supports_socket_pci_numa_affinity' "
"is invalid",
)
+ self.assertFalse(
+ drvr.capabilities['supports_ephemeral_encryption'],
+ "Driver capabilities for 'supports_ephemeral_encryption' "
+ "is invalid",
+ )
+ self.assertFalse(
+ drvr.capabilities['supports_ephemeral_encryption_luks'],
+ "Driver capabilities for 'supports_ephemeral_encryption_luks' "
+ " is invalid",
+ )
def test_driver_capabilities_qcow2_with_rbd(self):
self.flags(images_type='rbd', group='libvirt')
@@ -870,9 +878,9 @@ class LibvirtConnTestCase(test.NoDBTestCase,
"'swtpm_enabled=True'"
)
- @mock.patch.object(
- libvirt_driver.LibvirtDriver, '_register_instance_machine_type',
- new=mock.Mock())
+ @mock.patch.object(libvirt_driver.LibvirtDriver,
+ '_register_all_undefined_instance_details',
+ new=mock.Mock())
@mock.patch.object(
host.Host, 'supports_secure_boot', new_callable=mock.PropertyMock)
def test_driver_capabilities_secure_boot(self, mock_supports):
@@ -885,6 +893,23 @@ class LibvirtConnTestCase(test.NoDBTestCase,
)
mock_supports.assert_called_once_with()
+ @mock.patch.object(
+ libvirt_driver.LibvirtDriver,
+ '_register_all_undefined_instance_details',
+ new=mock.Mock())
+ @mock.patch.object(
+ host.Host, 'supports_remote_managed_ports',
+ new_callable=mock.PropertyMock)
+ def test_driver_capabilities_remote_managed_ports(self, mock_supports):
+ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ drvr.init_host("dummyhost")
+ self.assertTrue(
+ drvr.capabilities['supports_remote_managed_ports'],
+ "Driver capabilities for 'supports_remote_managed_ports' "
+ "is invalid when host should support this feature"
+ )
+ mock_supports.assert_called_once_with()
+
def test_driver_raises_on_non_linux_platform(self):
with utils.temporary_mutation(sys, platform='darwin'):
self.assertRaises(
@@ -946,9 +971,10 @@ class LibvirtConnTestCase(test.NoDBTestCase,
@mock.patch.object(libvirt_driver.LibvirtDriver, '_get_storage_bus_traits')
@mock.patch.object(libvirt_driver.LibvirtDriver, '_get_video_model_traits')
@mock.patch.object(libvirt_driver.LibvirtDriver, '_get_vif_model_traits')
+ @mock.patch.object(host.Host, "has_min_version")
def test_static_traits(
- self, mock_vif_traits, mock_video_traits, mock_storage_traits,
- mock_cpu_traits,
+ self, mock_version, mock_vif_traits, mock_video_traits,
+ mock_storage_traits, mock_cpu_traits,
):
"""Ensure driver capabilities are correctly retrieved and cached."""
@@ -959,14 +985,21 @@ class LibvirtConnTestCase(test.NoDBTestCase,
mock_video_traits.return_value = {'COMPUTE_GRAPHICS_MODEL_VGA': True}
mock_vif_traits.return_value = {'COMPUTE_NET_VIF_MODEL_VIRTIO': True}
+ # for support COMPUTE_VIOMMU_MODEL_VIRTIO
+ mock_version.return_value = True
+
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
expected = {
- 'HW_CPU_HYPERTHREADING': True,
- 'COMPUTE_STORAGE_BUS_VIRTIO': True,
'COMPUTE_GRAPHICS_MODEL_VGA': True,
'COMPUTE_NET_VIF_MODEL_VIRTIO': True,
'COMPUTE_SECURITY_TPM_1_2': False,
'COMPUTE_SECURITY_TPM_2_0': False,
+ 'COMPUTE_STORAGE_BUS_VIRTIO': True,
+ 'COMPUTE_VIOMMU_MODEL_AUTO': True,
+ 'COMPUTE_VIOMMU_MODEL_INTEL': True,
+ 'COMPUTE_VIOMMU_MODEL_SMMUV3': True,
+ 'COMPUTE_VIOMMU_MODEL_VIRTIO': True,
+ 'HW_CPU_HYPERTHREADING': True
}
static_traits = drvr.static_traits
@@ -1012,6 +1045,10 @@ class LibvirtConnTestCase(test.NoDBTestCase,
'COMPUTE_NET_VIF_MODEL_VIRTIO': True,
'COMPUTE_SECURITY_TPM_1_2': False,
'COMPUTE_SECURITY_TPM_2_0': False,
+ 'COMPUTE_VIOMMU_MODEL_AUTO': True,
+ 'COMPUTE_VIOMMU_MODEL_INTEL': True,
+ 'COMPUTE_VIOMMU_MODEL_SMMUV3': True,
+ 'COMPUTE_VIOMMU_MODEL_VIRTIO': False
}
static_traits = drvr.static_traits
@@ -1025,7 +1062,8 @@ class LibvirtConnTestCase(test.NoDBTestCase,
any_order=True)
@mock.patch.object(libvirt_driver.LibvirtDriver,
- '_register_instance_machine_type', new=mock.Mock())
+ '_register_all_undefined_instance_details',
+ new=mock.Mock())
@mock.patch.object(host.Host, "has_min_version")
def test_min_version_start_ok(self, mock_version):
mock_version.return_value = True
@@ -1041,7 +1079,8 @@ class LibvirtConnTestCase(test.NoDBTestCase,
"dummyhost")
@mock.patch.object(libvirt_driver.LibvirtDriver,
- '_register_instance_machine_type', new=mock.Mock())
+ '_register_all_undefined_instance_details',
+ new=mock.Mock())
@mock.patch.object(fakelibvirt.Connection, 'getLibVersion',
return_value=versionutils.convert_version_to_int(
libvirt_driver.NEXT_MIN_LIBVIRT_VERSION) - 1)
@@ -1071,7 +1110,8 @@ class LibvirtConnTestCase(test.NoDBTestCase,
self.assertTrue(version_arg_found)
@mock.patch.object(libvirt_driver.LibvirtDriver,
- '_register_instance_machine_type', new=mock.Mock())
+ '_register_all_undefined_instance_details',
+ new=mock.Mock())
@mock.patch.object(fakelibvirt.Connection, 'getVersion',
return_value=versionutils.convert_version_to_int(
libvirt_driver.NEXT_MIN_QEMU_VERSION) - 1)
@@ -1101,7 +1141,8 @@ class LibvirtConnTestCase(test.NoDBTestCase,
self.assertTrue(version_arg_found)
@mock.patch.object(libvirt_driver.LibvirtDriver,
- '_register_instance_machine_type', new=mock.Mock())
+ '_register_all_undefined_instance_details',
+ new=mock.Mock())
@mock.patch.object(fakelibvirt.Connection, 'getLibVersion',
return_value=versionutils.convert_version_to_int(
libvirt_driver.NEXT_MIN_LIBVIRT_VERSION))
@@ -1131,7 +1172,8 @@ class LibvirtConnTestCase(test.NoDBTestCase,
self.assertFalse(version_arg_found)
@mock.patch.object(libvirt_driver.LibvirtDriver,
- '_register_instance_machine_type', new=mock.Mock())
+ '_register_all_undefined_instance_details',
+ new=mock.Mock())
@mock.patch.object(fakelibvirt.Connection, 'getVersion',
return_value=versionutils.convert_version_to_int(
libvirt_driver.NEXT_MIN_QEMU_VERSION))
@@ -1161,7 +1203,8 @@ class LibvirtConnTestCase(test.NoDBTestCase,
self.assertFalse(version_arg_found)
@mock.patch.object(libvirt_driver.LibvirtDriver,
- '_register_instance_machine_type', new=mock.Mock())
+ '_register_all_undefined_instance_details',
+ new=mock.Mock())
def test_min_version_ppc_ok(self):
self.mock_uname.return_value = fakelibvirt.os_uname(
'Linux', '', '5.4.0-0-generic', '', fields.Architecture.PPC64)
@@ -1169,7 +1212,8 @@ class LibvirtConnTestCase(test.NoDBTestCase,
drvr.init_host("dummyhost")
@mock.patch.object(libvirt_driver.LibvirtDriver,
- '_register_instance_machine_type', new=mock.Mock())
+ '_register_all_undefined_instance_details',
+ new=mock.Mock())
def test_min_version_s390_ok(self):
self.mock_uname.return_value = fakelibvirt.os_uname(
'Linux', '', '5.4.0-0-generic', '', fields.Architecture.S390X)
@@ -1177,7 +1221,8 @@ class LibvirtConnTestCase(test.NoDBTestCase,
drvr.init_host("dummyhost")
@mock.patch.object(libvirt_driver.LibvirtDriver,
- '_register_instance_machine_type', new=mock.Mock())
+ '_register_all_undefined_instance_details',
+ new=mock.Mock())
def test_file_backed_memory_support_called(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
with mock.patch.object(drvr,
@@ -1232,7 +1277,8 @@ class LibvirtConnTestCase(test.NoDBTestCase,
)
@mock.patch.object(libvirt_driver.LibvirtDriver,
- '_register_instance_machine_type', new=mock.Mock())
+ '_register_all_undefined_instance_details',
+ new=mock.Mock())
def test__prepare_cpu_flag(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
@@ -1262,7 +1308,8 @@ class LibvirtConnTestCase(test.NoDBTestCase,
self.assertXmlEqual(expected_xml, cpu.to_xml())
@mock.patch.object(libvirt_driver.LibvirtDriver,
- '_register_instance_machine_type', new=mock.Mock())
+ '_register_all_undefined_instance_details',
+ new=mock.Mock())
def test__check_cpu_compatibility_start_ok(self):
self.flags(cpu_mode="custom",
cpu_models=["Penryn"],
@@ -1284,7 +1331,8 @@ class LibvirtConnTestCase(test.NoDBTestCase,
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
self.assertRaises(exception.Invalid, drvr.init_host, "dummyhost")
- @mock.patch('nova.virt.libvirt.host.libvirt.Connection.compareCPU')
+ @mock.patch(
+ 'nova.virt.libvirt.host.libvirt.Connection.compareHypervisorCPU')
def test__check_cpu_compatibility_advance_model(self, mocked_compare):
mocked_compare.side_effect = (2, 0)
self.flags(cpu_mode="custom",
@@ -1295,7 +1343,24 @@ class LibvirtConnTestCase(test.NoDBTestCase,
drvr.init_host, "dummyhost")
@mock.patch.object(libvirt_driver.LibvirtDriver,
- '_register_instance_machine_type', new=mock.Mock())
+ '_register_all_undefined_instance_details',
+ new=mock.Mock())
+ @mock.patch('nova.virt.libvirt.host.libvirt.Connection.compareCPU')
+ def test__check_cpu_compatibility_skip_compare_at_init(
+ self, mocked_compare
+ ):
+ self.flags(group='workarounds', skip_cpu_compare_at_startup=True)
+ self.flags(cpu_mode="custom",
+ cpu_models=["Icelake-Server-noTSX"],
+ cpu_model_extra_flags = ["-mpx"],
+ group="libvirt")
+ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ drvr.init_host("dummyhost")
+ mocked_compare.assert_not_called()
+
+ @mock.patch.object(libvirt_driver.LibvirtDriver,
+ '_register_all_undefined_instance_details',
+ new=mock.Mock())
def test__check_cpu_compatibility_with_flag(self):
self.flags(cpu_mode="custom",
cpu_models=["Penryn"],
@@ -1304,9 +1369,10 @@ class LibvirtConnTestCase(test.NoDBTestCase,
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
drvr.init_host("dummyhost")
- @mock.patch('nova.virt.libvirt.host.libvirt.Connection.compareCPU')
+ @mock.patch(
+ 'nova.virt.libvirt.host.libvirt.Connection.compareHypervisorCPU')
def test__check_cpu_compatibility_advance_flag(self, mocked_compare):
- mocked_compare.side_effect = (2, 0)
+ mocked_compare.side_effect = (-1, 0)
self.flags(cpu_mode="custom",
cpu_models=["qemu64"],
cpu_model_extra_flags = ["avx", "avx2"],
@@ -1315,11 +1381,12 @@ class LibvirtConnTestCase(test.NoDBTestCase,
self.assertRaises(exception.InvalidCPUInfo,
drvr.init_host, "dummyhost")
- @mock.patch('nova.virt.libvirt.host.libvirt.Connection.compareCPU')
+ @mock.patch(
+ 'nova.virt.libvirt.host.libvirt.Connection.compareHypervisorCPU')
def test__check_cpu_compatibility_wrong_flag(self, mocked_compare):
# here, and in the surrounding similar tests, the non-zero error
# code in the compareCPU() side effect indicates error
- mocked_compare.side_effect = (2, 0)
+ mocked_compare.side_effect = (-1, 0)
self.flags(cpu_mode="custom",
cpu_models=["Broadwell-noTSX"],
cpu_model_extra_flags = ["a v x"],
@@ -1328,11 +1395,12 @@ class LibvirtConnTestCase(test.NoDBTestCase,
self.assertRaises(exception.InvalidCPUInfo,
drvr.init_host, "dummyhost")
- @mock.patch('nova.virt.libvirt.host.libvirt.Connection.compareCPU')
+ @mock.patch(
+ 'nova.virt.libvirt.host.libvirt.Connection.compareHypervisorCPU')
def test__check_cpu_compatibility_enabled_and_disabled_flags(
self, mocked_compare
):
- mocked_compare.side_effect = (2, 0)
+ mocked_compare.side_effect = (-1, 0)
self.flags(
cpu_mode="custom",
cpu_models=["Cascadelake-Server"],
@@ -1354,7 +1422,8 @@ class LibvirtConnTestCase(test.NoDBTestCase,
self.assertRaises(exception.Invalid, drvr.init_host, "dummyhost")
@mock.patch.object(libvirt_driver.LibvirtDriver,
- '_register_instance_machine_type', new=mock.Mock())
+ '_register_all_undefined_instance_details',
+ new=mock.Mock())
def test__check_cpu_compatibility_aarch64_qemu_custom_start_OK(self):
"""Test getting CPU traits when using a virt_type that doesn't support
the feature, only kvm and qemu supports reporting CPU traits.
@@ -1372,6 +1441,9 @@ class LibvirtConnTestCase(test.NoDBTestCase,
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
drvr.init_host("dummyhost")
+ @mock.patch.object(libvirt_driver.LibvirtDriver,
+ '_register_all_undefined_instance_details',
+ new=mock.Mock())
def test__check_vtpm_support_non_qemu(self):
"""Test checking for vTPM support when we're not using QEMU or KVM."""
self.flags(swtpm_enabled=True, virt_type='lxc', group='libvirt')
@@ -1459,7 +1531,8 @@ class LibvirtConnTestCase(test.NoDBTestCase,
mock_getgrnam.assert_called_with('admins')
@mock.patch.object(libvirt_driver.LibvirtDriver,
- '_register_instance_machine_type', new=mock.Mock())
+ '_register_all_undefined_instance_details',
+ new=mock.Mock())
@mock.patch('shutil.which')
@mock.patch('pwd.getpwnam')
@mock.patch('grp.getgrnam')
@@ -1780,6 +1853,22 @@ class LibvirtConnTestCase(test.NoDBTestCase,
mock_guest.set_user_password.assert_called_once_with("root", "123")
+ @mock.patch('nova.virt.libvirt.host.Host.get_guest')
+ def test_qemu_announce_self(self, mock_get_guest):
+ # Enable the workaround, configure to call announce_self 3 times
+ self.flags(enable_qemu_monitor_announce_self=True, group='workarounds')
+
+ mock_guest = mock.Mock(spec=libvirt_guest.Guest)
+ mock_get_guest.return_value = mock_guest
+
+ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ drvr._qemu_monitor_announce_self(mock_guest)
+
+ # Ensure that 3 calls are made as defined by option
+ # enable_qemu_monitor_announce_self_retries default of 3
+ mock_guest.announce_self.assert_any_call()
+ self.assertEqual(3, mock_guest.announce_self.call_count)
+
@mock.patch('nova.utils.get_image_from_system_metadata')
@mock.patch.object(host.Host,
'has_min_version', return_value=True)
@@ -2227,6 +2316,8 @@ class LibvirtConnTestCase(test.NoDBTestCase,
instance_ref.info_cache = objects.InstanceInfoCache(
network_info=network_info)
+ pci_utils.get_mac_by_pci_address.side_effect = None
+ pci_utils.get_mac_by_pci_address.return_value = 'da:d1:f2:91:95:c1'
with test.nested(
mock.patch('nova.objects.VirtualInterfaceList'
'.get_by_instance_uuid', return_value=vifs),
@@ -2236,8 +2327,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
return_value=guest),
mock.patch.object(nova.virt.libvirt.guest.Guest, 'get_xml_desc',
return_value=xml),
- mock.patch.object(pci_utils, 'get_mac_by_pci_address',
- return_value='da:d1:f2:91:95:c1')):
+ ):
metadata_obj = drvr._build_device_metadata(self.context,
instance_ref)
metadata = metadata_obj.devices
@@ -2434,7 +2524,8 @@ class LibvirtConnTestCase(test.NoDBTestCase,
self.assertEqual(storage_ip, result['ip'])
@mock.patch.object(libvirt_driver.LibvirtDriver,
- '_register_instance_machine_type', new=mock.Mock())
+ '_register_all_undefined_instance_details',
+ new=mock.Mock())
def test_lifecycle_event_registration(self):
calls = []
@@ -2533,6 +2624,11 @@ class LibvirtConnTestCase(test.NoDBTestCase,
@mock.patch.object(time, "time")
def test_get_guest_config(self, time_mock):
+ """Generate a "standard" guest with minimal configuration.
+
+ This uses i440fx by default since that's our default machine type and
+ x86 is our default architecture (in our test env, anyway).
+ """
time_mock.return_value = 1234567.89
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
@@ -2541,177 +2637,251 @@ class LibvirtConnTestCase(test.NoDBTestCase,
test_instance["display_name"] = "purple tomatoes"
test_instance['system_metadata']['owner_project_name'] = 'sweetshop'
test_instance['system_metadata']['owner_user_name'] = 'cupcake'
-
- ctxt = context.RequestContext(project_id=123,
- project_name="aubergine",
- user_id=456,
- user_name="pie")
-
- flavor = objects.Flavor(name='m1.small',
- memory_mb=6,
- vcpus=28,
- root_gb=496,
- ephemeral_gb=8128,
- swap=33550336,
- extra_specs={})
+ ctxt = context.RequestContext(
+ project_id=123,
+ project_name="aubergine",
+ user_id=456,
+ user_name="pie",
+ )
+ flavor = objects.Flavor(
+ name='m1.small',
+ memory_mb=6,
+ vcpus=28,
+ root_gb=496,
+ ephemeral_gb=8128,
+ swap=33550336,
+ extra_specs={},
+ )
instance_ref = objects.Instance(**test_instance)
instance_ref.flavor = flavor
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
+ disk_info = blockinfo.get_disk_info(
+ CONF.libvirt.virt_type,
+ instance_ref,
+ image_meta,
+ )
- disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
- instance_ref,
- image_meta)
-
- cfg = drvr._get_guest_config(instance_ref,
- _fake_network_info(self),
- image_meta, disk_info,
- context=ctxt)
+ cfg = drvr._get_guest_config(
+ instance_ref,
+ _fake_network_info(self),
+ image_meta, disk_info,
+ context=ctxt,
+ )
self.assertEqual(cfg.uuid, instance_ref["uuid"])
- self.assertEqual(3, len(cfg.features))
- self.assertIsInstance(cfg.features[0],
- vconfig.LibvirtConfigGuestFeatureACPI)
- self.assertIsInstance(cfg.features[1],
- vconfig.LibvirtConfigGuestFeatureAPIC)
- self.assertIsInstance(
- cfg.features[2], vconfig.LibvirtConfigGuestFeatureVMCoreInfo)
self.assertEqual(cfg.memory, 6 * units.Ki)
self.assertEqual(cfg.vcpus, 28)
self.assertEqual(cfg.os_type, fields.VMMode.HVM)
self.assertEqual(cfg.os_boot_dev, ["hd"])
self.assertIsNone(cfg.os_root)
+
+ self.assertEqual(3, len(cfg.features))
+ for idx, device_type in enumerate([
+ vconfig.LibvirtConfigGuestFeatureACPI,
+ vconfig.LibvirtConfigGuestFeatureAPIC,
+ vconfig.LibvirtConfigGuestFeatureVMCoreInfo,
+ ]):
+ self.assertIsInstance(cfg.features[idx], device_type)
+
self.assertEqual(len(cfg.devices), 11)
- self.assertIsInstance(cfg.devices[0],
- vconfig.LibvirtConfigGuestDisk)
- self.assertIsInstance(cfg.devices[1],
- vconfig.LibvirtConfigGuestDisk)
- self.assertIsInstance(cfg.devices[2],
- vconfig.LibvirtConfigGuestDisk)
- self.assertIsInstance(cfg.devices[3],
- vconfig.LibvirtConfigGuestInterface)
- self.assertIsInstance(cfg.devices[4],
- vconfig.LibvirtConfigGuestSerial)
- self.assertIsInstance(cfg.devices[5],
- vconfig.LibvirtConfigGuestGraphics)
- self.assertIsInstance(cfg.devices[6],
- vconfig.LibvirtConfigGuestVideo)
- self.assertIsInstance(cfg.devices[7],
- vconfig.LibvirtConfigGuestInput)
- self.assertIsInstance(cfg.devices[8],
- vconfig.LibvirtConfigGuestRng)
- self.assertIsInstance(cfg.devices[9],
- vconfig.LibvirtConfigGuestUSBHostController)
- self.assertIsInstance(cfg.devices[10],
- vconfig.LibvirtConfigMemoryBalloon)
+ for idx, device_type in enumerate([
+ vconfig.LibvirtConfigGuestDisk,
+ vconfig.LibvirtConfigGuestDisk,
+ vconfig.LibvirtConfigGuestDisk,
+ vconfig.LibvirtConfigGuestInterface,
+ vconfig.LibvirtConfigGuestSerial,
+ vconfig.LibvirtConfigGuestGraphics,
+ vconfig.LibvirtConfigGuestVideo,
+ vconfig.LibvirtConfigGuestInput,
+ vconfig.LibvirtConfigGuestRng,
+ vconfig.LibvirtConfigGuestUSBHostController,
+ vconfig.LibvirtConfigMemoryBalloon,
+ ]):
+ self.assertIsInstance(cfg.devices[idx], device_type)
+
self.assertEqual(len(cfg.metadata), 1)
- self.assertIsInstance(cfg.metadata[0],
- vconfig.LibvirtConfigGuestMetaNovaInstance)
- self.assertEqual(version.version_string_with_package(),
- cfg.metadata[0].package)
- self.assertEqual("purple tomatoes",
- cfg.metadata[0].name)
- self.assertEqual(1234567.89,
- cfg.metadata[0].creationTime)
- self.assertEqual("image",
- cfg.metadata[0].roottype)
- self.assertEqual(str(instance_ref["image_ref"]),
- cfg.metadata[0].rootid)
-
- self.assertIsInstance(cfg.metadata[0].owner,
- vconfig.LibvirtConfigGuestMetaNovaOwner)
- self.assertEqual("838a72b0-0d54-4827-8fd6-fb1227633ceb",
- cfg.metadata[0].owner.userid)
- self.assertEqual("cupcake",
- cfg.metadata[0].owner.username)
- self.assertEqual("fake",
- cfg.metadata[0].owner.projectid)
- self.assertEqual("sweetshop",
- cfg.metadata[0].owner.projectname)
+ self.assertIsInstance(
+ cfg.metadata[0], vconfig.LibvirtConfigGuestMetaNovaInstance)
+ self.assertEqual(
+ version.version_string_with_package(), cfg.metadata[0].package)
+ self.assertEqual("purple tomatoes", cfg.metadata[0].name)
+ self.assertEqual(1234567.89, cfg.metadata[0].creationTime)
+ self.assertEqual("image", cfg.metadata[0].roottype)
+ self.assertEqual(
+ str(instance_ref["image_ref"]), cfg.metadata[0].rootid)
- self.assertIsInstance(cfg.metadata[0].flavor,
- vconfig.LibvirtConfigGuestMetaNovaFlavor)
- self.assertEqual("m1.small",
- cfg.metadata[0].flavor.name)
- self.assertEqual(6,
- cfg.metadata[0].flavor.memory)
- self.assertEqual(28,
- cfg.metadata[0].flavor.vcpus)
- self.assertEqual(496,
- cfg.metadata[0].flavor.disk)
- self.assertEqual(8128,
- cfg.metadata[0].flavor.ephemeral)
- self.assertEqual(33550336,
- cfg.metadata[0].flavor.swap)
+ self.assertIsInstance(
+ cfg.metadata[0].owner, vconfig.LibvirtConfigGuestMetaNovaOwner)
+ self.assertEqual(
+ "838a72b0-0d54-4827-8fd6-fb1227633ceb",
+ cfg.metadata[0].owner.userid)
+ self.assertEqual("cupcake", cfg.metadata[0].owner.username)
+ self.assertEqual("fake", cfg.metadata[0].owner.projectid)
+ self.assertEqual("sweetshop", cfg.metadata[0].owner.projectname)
+ self.assertIsInstance(
+ cfg.metadata[0].flavor, vconfig.LibvirtConfigGuestMetaNovaFlavor)
+ self.assertEqual("m1.small", cfg.metadata[0].flavor.name)
+ self.assertEqual(6, cfg.metadata[0].flavor.memory)
+ self.assertEqual(28, cfg.metadata[0].flavor.vcpus)
+ self.assertEqual(496, cfg.metadata[0].flavor.disk)
+ self.assertEqual(8128, cfg.metadata[0].flavor.ephemeral)
+ self.assertEqual(33550336, cfg.metadata[0].flavor.swap)
- def test_get_guest_config_q35(self):
- self.flags(virt_type="kvm",
- group='libvirt')
+ num_ports = 0
+ for device in cfg.devices:
+ try:
+ if (
+ device.root_name == 'controller' and
+ device.model == 'pcie-root-port'
+ ):
+ num_ports += 1
+ except AttributeError:
+ pass
- TEST_AMOUNT_OF_PCIE_SLOTS = 8
- CONF.set_override("num_pcie_ports", TEST_AMOUNT_OF_PCIE_SLOTS,
- group='libvirt')
+ # i440fx is not pcie machine so there should be no pcie ports
+ self.assertEqual(0, num_ports)
+
+ @mock.patch.object(time, "time")
+ def test_get_guest_config_no_pcie_ports(self, time_mock):
+ """Generate a "standard" guest with minimal configuration.
+
+ This uses i440fx by default since that's our default machine type and
+ x86 is our default architecture (in our test env, anyway).
+ """
+ time_mock.return_value = 1234567.89
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
- instance_ref = objects.Instance(**self.test_instance)
- image_meta = objects.ImageMeta.from_dict({
- "disk_format": "raw",
- "properties": {"hw_machine_type":
- "pc-q35-test"}})
- disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
- instance_ref,
- image_meta)
+ test_instance = copy.deepcopy(self.test_instance)
+ test_instance["display_name"] = "purple tomatoes"
+ test_instance['system_metadata']['owner_project_name'] = 'sweetshop'
+ test_instance['system_metadata']['owner_user_name'] = 'cupcake'
+ ctxt = context.RequestContext(
+ project_id=123,
+ project_name="aubergine",
+ user_id=456,
+ user_name="pie",
+ )
+ flavor = objects.Flavor(
+ name='m1.small',
+ memory_mb=6,
+ vcpus=28,
+ root_gb=496,
+ ephemeral_gb=8128,
+ swap=33550336,
+ extra_specs={},
+ )
+ instance_ref = objects.Instance(**test_instance)
+ instance_ref.flavor = flavor
+ image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
+ disk_info = blockinfo.get_disk_info(
+ CONF.libvirt.virt_type,
+ instance_ref,
+ image_meta,
+ )
- cfg = drvr._get_guest_config(instance_ref,
- _fake_network_info(self),
- image_meta, disk_info)
+ cfg = drvr._get_guest_config(
+ instance_ref,
+ _fake_network_info(self),
+ image_meta, disk_info,
+ context=ctxt,
+ )
num_ports = 0
for device in cfg.devices:
try:
- if (device.root_name == 'controller' and
- device.model == 'pcie-root-port'):
+ if (
+ device.root_name == 'controller' and
+ device.model == 'pcie-root-port'
+ ):
num_ports += 1
except AttributeError:
pass
- self.assertEqual(TEST_AMOUNT_OF_PCIE_SLOTS, num_ports)
+ # i440fx is not pcie machine so there should be no pcie ports
+ self.assertEqual(0, num_ports)
- def test_get_guest_config_pcie_i440fx(self):
- self.flags(virt_type="kvm",
- group='libvirt')
+ @mock.patch.object(host.Host, "_check_machine_type", new=mock.Mock())
+ def test_get_guest_config_q35(self):
+ """Generate a "q35" guest with minimal configuration.
+
+ This configures an explicit machine type (q35) but defaults to x86
+ since this is our default architecture (in our test env, anyway).
+ """
+ self.flags(virt_type="kvm", group='libvirt')
TEST_AMOUNT_OF_PCIE_SLOTS = 8
- CONF.set_override("num_pcie_ports", TEST_AMOUNT_OF_PCIE_SLOTS,
- group='libvirt')
+ CONF.set_override(
+ "num_pcie_ports", TEST_AMOUNT_OF_PCIE_SLOTS,
+ group='libvirt',
+ )
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict({
"disk_format": "raw",
- "properties": {"hw_machine_type":
- "pc-i440fx-test"}})
+ "properties": {"hw_machine_type": "q35"},
+ })
- disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
- instance_ref,
- image_meta)
+ disk_info = blockinfo.get_disk_info(
+ CONF.libvirt.virt_type,
+ instance_ref,
+ image_meta,
+ )
- cfg = drvr._get_guest_config(instance_ref,
- _fake_network_info(self),
- image_meta, disk_info)
+ cfg = drvr._get_guest_config(
+ instance_ref,
+ _fake_network_info(self),
+ image_meta,
+ disk_info,
+ )
+
+ self.assertEqual(3, len(cfg.features))
+ for idx, device_type in enumerate([
+ vconfig.LibvirtConfigGuestFeatureACPI,
+ vconfig.LibvirtConfigGuestFeatureAPIC,
+ vconfig.LibvirtConfigGuestFeatureVMCoreInfo,
+ ]):
+ self.assertIsInstance(cfg.features[idx], device_type)
+
+ self.assertEqual(len(cfg.devices), 19)
+ for idx, device_type in enumerate([
+ vconfig.LibvirtConfigGuestDisk,
+ vconfig.LibvirtConfigGuestDisk,
+ vconfig.LibvirtConfigGuestInterface,
+ vconfig.LibvirtConfigGuestSerial,
+ vconfig.LibvirtConfigGuestGraphics,
+ vconfig.LibvirtConfigGuestVideo,
+ vconfig.LibvirtConfigGuestInput,
+ vconfig.LibvirtConfigGuestRng,
+ vconfig.LibvirtConfigGuestPCIeRootController,
+ vconfig.LibvirtConfigGuestPCIeRootPortController,
+ vconfig.LibvirtConfigGuestPCIeRootPortController,
+ vconfig.LibvirtConfigGuestPCIeRootPortController,
+ vconfig.LibvirtConfigGuestPCIeRootPortController,
+ vconfig.LibvirtConfigGuestPCIeRootPortController,
+ vconfig.LibvirtConfigGuestPCIeRootPortController,
+ vconfig.LibvirtConfigGuestPCIeRootPortController,
+ vconfig.LibvirtConfigGuestPCIeRootPortController,
+ vconfig.LibvirtConfigGuestUSBHostController,
+ vconfig.LibvirtConfigMemoryBalloon,
+ ]):
+ self.assertIsInstance(cfg.devices[idx], device_type)
num_ports = 0
for device in cfg.devices:
try:
- if (device.root_name == 'controller' and
- device.model == 'pcie-root-port'):
+ if (
+ device.root_name == 'controller' and
+ device.model == 'pcie-root-port'
+ ):
num_ports += 1
except AttributeError:
pass
- # i440fx is not pcie machine so there should be no pcie ports
- self.assertEqual(0, num_ports)
+ self.assertEqual(TEST_AMOUNT_OF_PCIE_SLOTS, num_ports)
+ @mock.patch.object(host.Host, "_check_machine_type", new=mock.Mock())
@mock.patch('nova.virt.libvirt.utils.get_default_machine_type',
new=mock.Mock(return_value='config-machine_type'))
def test_get_guest_config_records_machine_type_in_instance(self):
@@ -2897,7 +3067,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
# NOTE(artom) This is a
# (cpu_set, guest_cpu_tune, guest_cpu_numa, guest_numa_tune)
- # tuple. See _get_guest_numa_config() docstring for full documenation.
+ # tuple. See _get_guest_numa_config() docstring for full documentation.
# _get_live_migrate_numa_info() only cares about guest_cpu_tune for CPU
# pinning and emulator thread pinning, and guest_numa_tune for cell
# pinning; so only include those 2 in the tuple.
@@ -2923,9 +3093,8 @@ class LibvirtConnTestCase(test.NoDBTestCase,
'fake-instance-numa-topology',
'fake-flavor', 'fake-image-meta').obj_to_primitive())
- @mock.patch.object(
- host.Host, "is_cpu_control_policy_capable", return_value=True)
- def test_get_guest_config_numa_host_instance_fits(self, is_able):
+ @mock.patch.object(host.Host, "_check_machine_type", new=mock.Mock())
+ def test_get_guest_config_numa_host_instance_fits(self):
self.flags(cpu_shared_set=None, cpu_dedicated_set=None,
group='compute')
instance_ref = objects.Instance(**self.test_instance)
@@ -2957,14 +3126,13 @@ class LibvirtConnTestCase(test.NoDBTestCase,
cfg = drvr._get_guest_config(instance_ref, [],
image_meta, disk_info)
self.assertIsNone(cfg.cpuset)
- self.assertEqual(0, len(cfg.cputune.vcpupin))
+ self.assertIsNone(cfg.cputune)
self.assertIsNone(cfg.cpu.numa)
+ @mock.patch.object(host.Host, "_check_machine_type", new=mock.Mock())
@mock.patch('nova.privsep.utils.supports_direct_io',
new=mock.Mock(return_value=True))
- @mock.patch.object(
- host.Host, "is_cpu_control_policy_capable", return_value=True)
- def test_get_guest_config_numa_host_instance_no_fit(self, is_able):
+ def test_get_guest_config_numa_host_instance_no_fit(self):
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
flavor = objects.Flavor(memory_mb=4096, vcpus=4, root_gb=496,
@@ -2994,7 +3162,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
image_meta, disk_info)
self.assertFalse(choice_mock.called)
self.assertIsNone(cfg.cpuset)
- self.assertEqual(0, len(cfg.cputune.vcpupin))
+ self.assertIsNone(cfg.cputune)
self.assertIsNone(cfg.cpu.numa)
def _test_get_guest_memory_backing_config(
@@ -3093,6 +3261,41 @@ class LibvirtConnTestCase(test.NoDBTestCase,
self.assertTrue(membacking.locked)
self.assertFalse(membacking.sharedpages)
+ def test_get_guest_memory_backing_config_locked_flavor(self):
+ extra_specs = {
+ "hw:locked_memory": "True",
+ "hw:mem_page_size": 1000,
+ }
+ flavor = objects.Flavor(
+ name='m1.small', memory_mb=6, vcpus=28, root_gb=496,
+ ephemeral_gb=8128, swap=33550336, extra_specs=extra_specs)
+ image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
+ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ membacking = drvr._get_guest_memory_backing_config(
+ None, None, flavor, image_meta)
+ self.assertTrue(membacking.locked)
+
+ def test_get_guest_memory_backing_config_locked_image_meta(self):
+ extra_specs = {}
+ flavor = objects.Flavor(
+ name='m1.small',
+ memory_mb=6,
+ vcpus=28,
+ root_gb=496,
+ ephemeral_gb=8128,
+ swap=33550336,
+ extra_specs=extra_specs)
+ image_meta = objects.ImageMeta.from_dict({
+ "disk_format": "raw",
+ "properties": {
+ "hw_locked_memory": "True",
+ "hw_mem_page_size": 1000,
+ }})
+ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ membacking = drvr._get_guest_memory_backing_config(
+ None, None, flavor, image_meta)
+ self.assertTrue(membacking.locked)
+
def test_get_guest_memory_backing_config_realtime_invalid_share(self):
"""Test behavior when there is no pool of shared CPUS on which to place
the emulator threads, isolating them from the instance CPU processes.
@@ -3196,7 +3399,8 @@ class LibvirtConnTestCase(test.NoDBTestCase,
self.assertEqual(
"Memory encryption requested by hw:mem_encryption extra spec in "
"m1.fake flavor but image fake_image doesn't have "
- "'hw_firmware_type' property set to 'uefi'", str(exc))
+ "'hw_firmware_type' property set to 'uefi' or volume-backed "
+ "instance was requested", str(exc))
def test_sev_enabled_host_extra_spec_no_machine_type(self):
exc = self.assertRaises(exception.InvalidMachineType,
@@ -3355,10 +3559,8 @@ class LibvirtConnTestCase(test.NoDBTestCase,
self._test_get_guest_memory_backing_config,
host_topology, inst_topology, numa_tune)
- @mock.patch.object(
- host.Host, "is_cpu_control_policy_capable", return_value=True)
- def test_get_guest_config_numa_host_instance_pci_no_numa_info(
- self, is_able):
+ @mock.patch.object(host.Host, "_check_machine_type", new=mock.Mock())
+ def test_get_guest_config_numa_host_instance_pci_no_numa_info(self):
self.flags(cpu_shared_set='3', cpu_dedicated_set=None,
group='compute')
@@ -3385,10 +3587,15 @@ class LibvirtConnTestCase(test.NoDBTestCase,
status=fields.PciDeviceStatus.AVAILABLE,
address='0000:00:00.1',
instance_uuid=None,
- request_id=None,
+ request_id=uuids.pci_req1,
extra_info={},
numa_node=None)
pci_device = objects.PciDevice(**pci_device_info)
+ instance_ref.pci_devices = objects.PciDeviceList(objects=[pci_device])
+ pci_req = objects.InstancePCIRequest(
+ request_id=uuids.pci_req1, alias_name='pci-alias-1')
+ instance_ref.pci_requests = objects.InstancePCIRequests(
+ requests=[pci_req])
with test.nested(
mock.patch.object(host.Host, 'has_min_version',
@@ -3396,20 +3603,18 @@ class LibvirtConnTestCase(test.NoDBTestCase,
mock.patch.object(host.Host, "get_capabilities",
return_value=caps),
mock.patch.object(host.Host, 'get_online_cpus',
- return_value=set([3])),
- mock.patch.object(pci_manager, "get_instance_pci_devs",
- return_value=[pci_device])):
+ return_value=set([3]))
+ ):
cfg = conn._get_guest_config(instance_ref, [],
image_meta, disk_info)
self.assertEqual(set([3]), cfg.cpuset)
- self.assertEqual(0, len(cfg.cputune.vcpupin))
+ self.assertIsNone(cfg.cputune)
self.assertIsNone(cfg.cpu.numa)
+ @mock.patch.object(host.Host, "_check_machine_type", new=mock.Mock())
@mock.patch('nova.privsep.utils.supports_direct_io',
new=mock.Mock(return_value=True))
- @mock.patch.object(
- host.Host, "is_cpu_control_policy_capable", return_value=True)
- def test_get_guest_config_numa_host_instance_2pci_no_fit(self, is_able):
+ def test_get_guest_config_numa_host_instance_2pci_no_fit(self):
self.flags(cpu_shared_set='3', cpu_dedicated_set=None,
group='compute')
instance_ref = objects.Instance(**self.test_instance)
@@ -3435,28 +3640,36 @@ class LibvirtConnTestCase(test.NoDBTestCase,
status=fields.PciDeviceStatus.AVAILABLE,
address='0000:00:00.1',
instance_uuid=None,
- request_id=None,
+ request_id=uuids.pci_req1,
extra_info={},
numa_node=1)
pci_device = objects.PciDevice(**pci_device_info)
pci_device_info.update(numa_node=0, address='0000:00:00.2')
pci_device2 = objects.PciDevice(**pci_device_info)
+ instance_ref.pci_devices = objects.PciDeviceList(
+ objects=[pci_device, pci_device2]
+ )
+ instance_ref.pci_requests = objects.InstancePCIRequests(
+ requests=[
+ objects.InstancePCIRequest(
+ request_id=uuids.pci_req1, alias_name="pci-alias-1"
+ )
+ ]
+ )
with test.nested(
mock.patch.object(
host.Host, "get_capabilities", return_value=caps),
mock.patch.object(host.Host, 'get_online_cpus',
return_value=set([3])),
mock.patch.object(random, 'choice'),
- mock.patch.object(pci_manager, "get_instance_pci_devs",
- return_value=[pci_device, pci_device2]),
mock.patch.object(conn, '_has_numa_support',
return_value=False)
- ) as (_, _, choice_mock, pci_mock, _):
+ ) as (_, _, choice_mock, _):
cfg = conn._get_guest_config(instance_ref, [],
image_meta, disk_info)
self.assertFalse(choice_mock.called)
self.assertEqual(set([3]), cfg.cpuset)
- self.assertEqual(0, len(cfg.cputune.vcpupin))
+ self.assertIsNone(cfg.cputune)
self.assertIsNone(cfg.cpu.numa)
@mock.patch.object(fakelibvirt.Connection, 'getType')
@@ -3517,10 +3730,8 @@ class LibvirtConnTestCase(test.NoDBTestCase,
exception.NUMATopologyUnsupported,
None)
- @mock.patch.object(
- host.Host, "is_cpu_control_policy_capable", return_value=True)
- def test_get_guest_config_numa_host_instance_fit_w_cpu_pinset(
- self, is_able):
+ @mock.patch.object(host.Host, "_check_machine_type", new=mock.Mock())
+ def test_get_guest_config_numa_host_instance_fit_w_cpu_pinset(self):
self.flags(cpu_shared_set='2-3', cpu_dedicated_set=None,
group='compute')
@@ -3555,12 +3766,11 @@ class LibvirtConnTestCase(test.NoDBTestCase,
# NOTE(ndipanov): we make sure that pin_set was taken into account
# when choosing viable cells
self.assertEqual(set([2, 3]), cfg.cpuset)
- self.assertEqual(0, len(cfg.cputune.vcpupin))
+ self.assertIsNone(cfg.cputune)
self.assertIsNone(cfg.cpu.numa)
- @mock.patch.object(
- host.Host, "is_cpu_control_policy_capable", return_value=True)
- def test_get_guest_config_non_numa_host_instance_topo(self, is_able):
+ @mock.patch.object(host.Host, "_check_machine_type", new=mock.Mock())
+ def test_get_guest_config_non_numa_host_instance_topo(self):
instance_topology = objects.InstanceNUMATopology(cells=[
objects.InstanceNUMACell(
id=0, cpuset=set([0]), pcpuset=set(), memory=1024),
@@ -3597,7 +3807,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
cfg = drvr._get_guest_config(instance_ref, [],
image_meta, disk_info)
self.assertIsNone(cfg.cpuset)
- self.assertEqual(0, len(cfg.cputune.vcpupin))
+ self.assertIsNone(cfg.cputune)
self.assertIsNone(cfg.numatune)
self.assertIsNotNone(cfg.cpu.numa)
for instance_cell, numa_cfg_cell in zip(
@@ -3607,9 +3817,8 @@ class LibvirtConnTestCase(test.NoDBTestCase,
self.assertEqual(instance_cell.memory * units.Ki,
numa_cfg_cell.memory)
- @mock.patch.object(
- host.Host, "is_cpu_control_policy_capable", return_value=True)
- def test_get_guest_config_numa_host_instance_topo(self, is_able):
+ @mock.patch.object(host.Host, "_check_machine_type", new=mock.Mock())
+ def test_get_guest_config_numa_host_instance_topo(self):
self.flags(cpu_shared_set='0-5', cpu_dedicated_set=None,
group='compute')
@@ -3690,6 +3899,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
self.assertEqual([instance_cell.id], memnode.nodeset)
self.assertEqual("strict", memnode.mode)
+ @mock.patch.object(host.Host, "_check_machine_type", new=mock.Mock())
def test_get_guest_config_numa_host_instance_topo_reordered(self):
instance_topology = objects.InstanceNUMATopology(cells=[
objects.InstanceNUMACell(
@@ -3765,6 +3975,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
self.assertEqual([instance_cell.id], memnode.nodeset)
self.assertEqual("strict", memnode.mode)
+ @mock.patch.object(host.Host, "_check_machine_type", new=mock.Mock())
def test_get_guest_config_numa_host_instance_topo_cpu_pinning(self):
instance_topology = objects.InstanceNUMATopology(cells=[
objects.InstanceNUMACell(
@@ -3843,6 +4054,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
self.assertEqual([instance_cell.id], memnode.nodeset)
self.assertEqual("strict", memnode.mode)
+ @mock.patch.object(host.Host, "_check_machine_type", new=mock.Mock())
def test_get_guest_config_numa_host_instance_cpu_mixed(self):
"""Test to create mixed instance libvirt configuration which has a
default emulator thread policy and verify the NUMA topology related
@@ -3960,7 +4172,9 @@ class LibvirtConnTestCase(test.NoDBTestCase,
self.assertEqual(i, memnode.cellid)
self.assertEqual([instance_cell.id], memnode.nodeset)
- def test_get_guest_config_numa_host_instance_cpu_mixed_isolated_emu(self):
+ @mock.patch.object(host.Host, "_check_machine_type", new=mock.Mock())
+ def test_get_guest_config_numa_host_instance_cpu_mixed_isolated_emu(
+ self):
"""Test to create mixed instance libvirt configuration which has an
ISOLATED emulator thread policy and verify the NUMA topology related
settings.
@@ -4047,6 +4261,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
self.assertEqual(i, memnode.cellid)
self.assertEqual([instance_cell.id], memnode.nodeset)
+ @mock.patch.object(host.Host, "_check_machine_type", new=mock.Mock())
def test_get_guest_config_numa_host_instance_cpu_mixed_realtime(self):
"""Test of creating mixed instance libvirt configuration. which is
created through 'hw:cpu_realtime_mask' and 'hw:cpu_realtime' extra
@@ -4172,6 +4387,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
self.assertEqual("fifo", cfg.cputune.vcpusched[0].scheduler)
self.assertEqual(set([0, 1, 4, 5]), cfg.cputune.vcpusched[0].vcpus)
+ @mock.patch.object(host.Host, "_check_machine_type", new=mock.Mock())
def test_get_guest_config_numa_host_mempages_shared(self):
self.flags(cpu_shared_set='2-5', cpu_dedicated_set=None,
group='compute')
@@ -4245,7 +4461,9 @@ class LibvirtConnTestCase(test.NoDBTestCase,
self.assertEqual(0, len(cfg.cputune.vcpusched))
self.assertEqual(set([2, 3, 4, 5]), cfg.cputune.emulatorpin.cpuset)
- def test_get_guest_config_numa_host_instance_cpu_pinning_realtime(self):
+ @mock.patch.object(host.Host, "_check_machine_type", new=mock.Mock())
+ def test_get_guest_config_numa_host_instance_cpu_pinning_realtime(
+ self):
self.flags(cpu_shared_set=None, cpu_dedicated_set='4-7',
group='compute')
@@ -4340,6 +4558,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
# which are 6, 7
self.assertEqual(set([2, 3]), cfg.cputune.vcpusched[0].vcpus)
+ @mock.patch.object(host.Host, "_check_machine_type", new=mock.Mock())
def test_get_guest_config_numa_host_instance_isolated_emulthreads(self):
self.flags(cpu_shared_set=None, cpu_dedicated_set='4-8',
group='compute')
@@ -4446,8 +4665,8 @@ class LibvirtConnTestCase(test.NoDBTestCase,
self.assertRaises(exception.Invalid, drvr._get_guest_config,
instance_ref, [], image_meta, disk_info)
- def test_get_guest_config_numa_host_instance_shared_emulator_threads(
- self):
+ @mock.patch.object(host.Host, "_check_machine_type", new=mock.Mock())
+ def test_get_guest_config_numa_host_instance_shared_emulator_threads(self):
self.flags(cpu_shared_set='0,1', cpu_dedicated_set='2-7',
group='compute')
instance_topology = objects.InstanceNUMATopology(
@@ -4653,6 +4872,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
self.assertTrue(drvr._wants_hugepages(host_topology,
instance_topology))
+ @mock.patch.object(host.Host, "_check_machine_type", new=mock.Mock())
def test_get_guest_config_clock(self):
self.flags(virt_type='kvm', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
@@ -4696,6 +4916,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
else:
self.assertEqual(2, len(cfg.clock.timers))
+ @mock.patch.object(host.Host, "_check_machine_type", new=mock.Mock())
def test_get_guest_config_clock_hpet_false(self):
self.flags(virt_type='kvm', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
@@ -4741,6 +4962,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
else:
self.assertEqual(2, len(cfg.clock.timers))
+ @mock.patch.object(host.Host, "_check_machine_type", new=mock.Mock())
def test_get_guest_config_clock_hpet_true(self):
self.flags(virt_type='kvm', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
@@ -4787,6 +5009,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
else:
self.assertEqual(2, len(cfg.clock.timers))
+ @mock.patch.object(host.Host, "_check_machine_type", new=mock.Mock())
def test_get_guest_config_clock_hpet_invalid(self):
self.flags(virt_type='kvm', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
@@ -5066,6 +5289,44 @@ class LibvirtConnTestCase(test.NoDBTestCase,
self.assertEqual('/usr/share/OVMF/OVMF_CODE.fd', cfg.os_loader)
self.assertEqual('/usr/share/OVMF/OVMF_VARS.fd', cfg.os_nvram_template)
+ def test_get_guest_config_with_secure_boot_and_smm_required(self):
+ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ # uefi only used with secure boot
+ drvr._host._supports_uefi = True
+ # smm only used with secure boot
+ drvr._host._supports_secure_boot = True
+
+ # NOTE(imranh2): Current way of gathering firmwares is inflexible
+ # nova/tests/fixtures/libvirt.py FakeLoaders has requires-smm
+ # defined. do the following to make sure we get this programtically
+ # in the future we should test firmwares that both do and don't
+ # require smm but the current way firmware is selected doesn't
+ # make it possible to do so.
+ loader, nvram_template, requires_smm = drvr._host.get_loader(
+ 'x86_64', 'q35', True)
+
+ image_meta = objects.ImageMeta.from_dict({
+ 'disk_format': 'raw',
+ # secure boot requires UEFI
+ 'properties': {
+ 'hw_firmware_type': 'uefi',
+ 'hw_machine_type': 'q35',
+ 'os_secure_boot': 'required',
+ },
+ })
+ instance_ref = objects.Instance(**self.test_instance)
+
+ disk_info = blockinfo.get_disk_info(
+ CONF.libvirt.virt_type, instance_ref, image_meta)
+
+ cfg = drvr._get_guest_config(
+ instance_ref, [], image_meta, disk_info)
+ # if we require it make sure it's there
+ if requires_smm:
+ self.assertTrue(any(isinstance(feature,
+ vconfig.LibvirtConfigGuestFeatureSMM)
+ for feature in cfg.features))
+
@ddt.data(True, False)
def test_get_guest_config_with_secure_boot_required(
self, host_has_support,
@@ -5146,7 +5407,8 @@ class LibvirtConnTestCase(test.NoDBTestCase,
self.mock_uname.return_value = fakelibvirt.os_uname(
'Linux', '', '5.4.0-0-generic', '', fields.Architecture.AARCH64)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
- self.assertTrue(drvr._check_uefi_support(None))
+ image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
+ self.assertTrue(drvr._check_uefi_support(image_meta))
def test_get_guest_config_with_block_device(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
@@ -5241,6 +5503,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
self.assertEqual(cfg.devices[2].target_dev, 'vdd')
mock_save.assert_called_with()
+ @mock.patch.object(host.Host, "_check_machine_type", new=mock.Mock())
def test_get_guest_config_with_configdrive(self):
# It's necessary to check if the architecture is power, because
# power doesn't have support to ide, and so libvirt translate
@@ -5559,6 +5822,11 @@ class LibvirtConnTestCase(test.NoDBTestCase,
self.assertEqual(cfg.devices[3].type, 'vnc')
self.assertEqual(cfg.devices[3].listen, '10.0.0.1')
self.assertIsNone(cfg.devices[3].keymap)
+ self.assertIsNone(cfg.devices[3].image_compression)
+ self.assertIsNone(cfg.devices[3].jpeg_compression)
+ self.assertIsNone(cfg.devices[3].zlib_compression)
+ self.assertIsNone(cfg.devices[3].playback_compression)
+ self.assertIsNone(cfg.devices[3].streaming_mode)
def test_get_guest_config_with_vnc_and_tablet(self):
self.flags(enabled=True, group='vnc')
@@ -5589,6 +5857,11 @@ class LibvirtConnTestCase(test.NoDBTestCase,
vconfig.LibvirtConfigMemoryBalloon)
self.assertEqual(cfg.devices[3].type, 'vnc')
+ self.assertIsNone(cfg.devices[3].image_compression)
+ self.assertIsNone(cfg.devices[3].jpeg_compression)
+ self.assertIsNone(cfg.devices[3].zlib_compression)
+ self.assertIsNone(cfg.devices[3].playback_compression)
+ self.assertIsNone(cfg.devices[3].streaming_mode)
self.assertEqual(cfg.devices[5].type, 'tablet')
def test_get_guest_config_with_spice_and_tablet(self):
@@ -5625,8 +5898,14 @@ class LibvirtConnTestCase(test.NoDBTestCase,
self.assertEqual(cfg.devices[3].type, 'spice')
self.assertEqual(cfg.devices[3].listen, '10.0.0.1')
self.assertIsNone(cfg.devices[3].keymap)
+ self.assertIsNone(cfg.devices[3].image_compression)
+ self.assertIsNone(cfg.devices[3].jpeg_compression)
+ self.assertIsNone(cfg.devices[3].zlib_compression)
+ self.assertIsNone(cfg.devices[3].playback_compression)
+ self.assertIsNone(cfg.devices[3].streaming_mode)
self.assertEqual(cfg.devices[5].type, 'tablet')
+ @mock.patch.object(host.Host, "_check_machine_type", new=mock.Mock())
def test_get_guest_config_with_spice_and_agent(self):
self.flags(enabled=False, group='vnc')
self.flags(virt_type='kvm', group='libvirt')
@@ -5683,8 +5962,57 @@ class LibvirtConnTestCase(test.NoDBTestCase,
self.assertEqual(cfg.devices[3].target_name, "com.redhat.spice.0")
self.assertEqual(cfg.devices[3].type, 'spicevmc')
self.assertEqual(cfg.devices[4].type, "spice")
+ self.assertIsNone(cfg.devices[4].image_compression)
+ self.assertIsNone(cfg.devices[4].jpeg_compression)
+ self.assertIsNone(cfg.devices[4].zlib_compression)
+ self.assertIsNone(cfg.devices[4].playback_compression)
+ self.assertIsNone(cfg.devices[4].streaming_mode)
self.assertEqual(cfg.devices[5].type, video_type)
+ def test_get_guest_config_with_spice_compression(self):
+ self.flags(enabled=False, group='vnc')
+ self.flags(virt_type='kvm', group='libvirt')
+ self.flags(enabled=True,
+ agent_enabled=False,
+ image_compression='auto_lz',
+ jpeg_compression='never',
+ zlib_compression='always',
+ playback_compression=False,
+ streaming_mode='all',
+ server_listen='10.0.0.1',
+ group='spice')
+ self.flags(pointer_model='usbtablet')
+
+ cfg = self._get_guest_config_with_graphics()
+
+ self.assertEqual(len(cfg.devices), 9)
+ self.assertIsInstance(cfg.devices[0],
+ vconfig.LibvirtConfigGuestDisk)
+ self.assertIsInstance(cfg.devices[1],
+ vconfig.LibvirtConfigGuestDisk)
+ self.assertIsInstance(cfg.devices[2],
+ vconfig.LibvirtConfigGuestSerial)
+ self.assertIsInstance(cfg.devices[3],
+ vconfig.LibvirtConfigGuestGraphics)
+ self.assertIsInstance(cfg.devices[4],
+ vconfig.LibvirtConfigGuestVideo)
+ self.assertIsInstance(cfg.devices[5],
+ vconfig.LibvirtConfigGuestInput)
+ self.assertIsInstance(cfg.devices[6],
+ vconfig.LibvirtConfigGuestRng)
+ self.assertIsInstance(cfg.devices[7],
+ vconfig.LibvirtConfigGuestUSBHostController)
+ self.assertIsInstance(cfg.devices[8],
+ vconfig.LibvirtConfigMemoryBalloon)
+
+ self.assertEqual(cfg.devices[3].type, 'spice')
+ self.assertEqual(cfg.devices[3].listen, '10.0.0.1')
+ self.assertEqual(cfg.devices[3].image_compression, 'auto_lz')
+ self.assertEqual(cfg.devices[3].jpeg_compression, 'never')
+ self.assertEqual(cfg.devices[3].zlib_compression, 'always')
+ self.assertFalse(cfg.devices[3].playback_compression)
+ self.assertEqual(cfg.devices[3].streaming_mode, 'all')
+
@mock.patch.object(host.Host, 'get_guest')
@mock.patch.object(libvirt_driver.LibvirtDriver,
'_get_serial_ports_from_guest')
@@ -5697,6 +6025,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
self.flags(enabled=True, group='serial_console')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance = objects.Instance(**self.test_instance)
+ image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
expected = {
fields.Architecture.X86_64: vconfig.LibvirtConfigGuestSerial,
@@ -5709,7 +6038,10 @@ class LibvirtConnTestCase(test.NoDBTestCase,
guest = vconfig.LibvirtConfigGuest()
drvr._create_consoles(
- guest_cfg=guest, instance=instance, flavor={}, image_meta={})
+ guest_cfg=guest,
+ instance=instance,
+ flavor={},
+ image_meta=image_meta)
self.assertEqual(1, len(guest.devices))
console_device = guest.devices[0]
self.assertIsInstance(console_device, device_type)
@@ -5921,9 +6253,13 @@ class LibvirtConnTestCase(test.NoDBTestCase,
self.flags(enabled=serial_enabled, group='serial_console')
guest_cfg = vconfig.LibvirtConfigGuest()
instance = objects.Instance(**self.test_instance)
+ image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
drvr._create_consoles(
- guest_cfg, instance=instance, flavor=None, image_meta=None)
+ guest_cfg,
+ instance=instance,
+ flavor=None,
+ image_meta=image_meta)
self.assertEqual(1, len(guest_cfg.devices))
device = guest_cfg.devices[0]
@@ -6094,6 +6430,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
i = drvr._get_scsi_controller_next_unit(guest)
self.assertEqual(expect_num, i)
+ @mock.patch.object(host.Host, "_check_machine_type", new=mock.Mock())
def test_get_guest_config_with_type_kvm_on_s390(self):
self.flags(enabled=False, group='vnc')
self.flags(virt_type='kvm', group='libvirt')
@@ -6893,14 +7230,12 @@ class LibvirtConnTestCase(test.NoDBTestCase,
self.assertEqual(cfg.devices[5].rate_bytes, 1024)
self.assertEqual(cfg.devices[5].rate_period, 2)
- @mock.patch('nova.virt.libvirt.driver.os.path.exists')
- @test.patch_exists(SEV_KERNEL_PARAM_FILE, False)
- def test_get_guest_config_with_rng_backend(self, mock_path):
+ @test.patch_exists(SEV_KERNEL_PARAM_FILE, result=False, other=True)
+ def test_get_guest_config_with_rng_backend(self):
self.flags(virt_type='kvm',
rng_dev_path='/dev/hw_rng',
group='libvirt')
self.flags(pointer_model='ps2mouse')
- mock_path.return_value = True
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
@@ -6957,29 +7292,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
[],
image_meta, disk_info)
- @mock.patch.object(
- host.Host, "is_cpu_control_policy_capable", return_value=True)
- def test_guest_cpu_shares_with_multi_vcpu(self, is_able):
- self.flags(virt_type='kvm', group='libvirt')
-
- drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
-
- instance_ref = objects.Instance(**self.test_instance)
- instance_ref.flavor.vcpus = 4
- image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
-
- disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
- instance_ref,
- image_meta)
-
- cfg = drvr._get_guest_config(instance_ref, [],
- image_meta, disk_info)
-
- self.assertEqual(4096, cfg.cputune.shares)
-
- @mock.patch.object(
- host.Host, "is_cpu_control_policy_capable", return_value=True)
- def test_get_guest_config_with_cpu_quota(self, is_able):
+ def test_get_guest_config_with_cpu_quota(self):
self.flags(virt_type='kvm', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
@@ -7315,9 +7628,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
self.flags(images_type='rbd', group='libvirt')
self._test_get_guest_config_disk_cachemodes('rbd')
- @mock.patch.object(
- host.Host, "is_cpu_control_policy_capable", return_value=True)
- def test_get_guest_config_with_bogus_cpu_quota(self, is_able):
+ def test_get_guest_config_with_bogus_cpu_quota(self):
self.flags(virt_type='kvm', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
@@ -7335,9 +7646,10 @@ class LibvirtConnTestCase(test.NoDBTestCase,
drvr._get_guest_config,
instance_ref, [], image_meta, disk_info)
- @mock.patch.object(
- host.Host, "is_cpu_control_policy_capable", return_value=False)
- def test_get_update_guest_cputune(self, is_able):
+ def test_get_update_guest_cputune(self):
+ # No CPU controller on the host
+ self.cgroups.version = 0
+
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
instance_ref.flavor.extra_specs = {'quota:cpu_shares': '10000',
@@ -7389,12 +7701,19 @@ class LibvirtConnTestCase(test.NoDBTestCase,
address='0000:00:00.1',
compute_id=compute_ref.id,
instance_uuid=instance.uuid,
- request_id=None,
+ request_id=uuids.pci_req1,
extra_info={})
pci_device = objects.PciDevice(**pci_device_info)
pci_list = objects.PciDeviceList()
pci_list.objects.append(pci_device)
instance.pci_devices = pci_list
+ instance.pci_requests = objects.InstancePCIRequests(
+ requests=[
+ objects.InstancePCIRequest(
+ request_id=uuids.pci_req1, alias_name="pci-alias"
+ )
+ ]
+ )
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
@@ -7510,11 +7829,8 @@ class LibvirtConnTestCase(test.NoDBTestCase,
@mock.patch.object(libvirt_driver.LibvirtDriver,
"_get_guest_storage_config")
@mock.patch.object(libvirt_driver.LibvirtDriver, "_has_numa_support")
- @mock.patch('os.path.exists', return_value=True)
- @test.patch_exists(SEV_KERNEL_PARAM_FILE, False)
- def test_get_guest_config_aarch64(
- self, mock_path_exists, mock_numa, mock_storage,
- ):
+ @test.patch_exists(SEV_KERNEL_PARAM_FILE, result=False, other=True)
+ def test_get_guest_config_aarch64(self, mock_numa, mock_storage):
TEST_AMOUNT_OF_PCIE_SLOTS = 8
CONF.set_override("num_pcie_ports", TEST_AMOUNT_OF_PCIE_SLOTS,
group='libvirt')
@@ -7534,7 +7850,6 @@ class LibvirtConnTestCase(test.NoDBTestCase,
cfg = drvr._get_guest_config(instance_ref,
_fake_network_info(self),
image_meta, disk_info)
- self.assertTrue(mock_path_exists.called)
self.assertEqual(cfg.os_mach_type, "virt")
num_ports = 0
@@ -7551,10 +7866,9 @@ class LibvirtConnTestCase(test.NoDBTestCase,
@mock.patch.object(libvirt_driver.LibvirtDriver,
"_get_guest_storage_config")
@mock.patch.object(libvirt_driver.LibvirtDriver, "_has_numa_support")
- @mock.patch('os.path.exists', return_value=True)
- @test.patch_exists(SEV_KERNEL_PARAM_FILE, False)
+ @test.patch_exists(SEV_KERNEL_PARAM_FILE, result=False, other=True)
def test_get_guest_config_aarch64_with_graphics(
- self, mock_path_exists, mock_numa, mock_storage,
+ self, mock_numa, mock_storage,
):
self.mock_uname.return_value = fakelibvirt.os_uname(
'Linux', '', '5.4.0-0-generic', '', fields.Architecture.AARCH64)
@@ -7564,7 +7878,6 @@ class LibvirtConnTestCase(test.NoDBTestCase,
cfg = self._get_guest_config_with_graphics()
- self.assertTrue(mock_path_exists.called)
self.assertEqual(cfg.os_mach_type, "virt")
usbhost_exists = False
@@ -7593,16 +7906,20 @@ class LibvirtConnTestCase(test.NoDBTestCase,
return drvr._get_guest_config(
instance_ref, _fake_network_info(self), image_meta, disk_info)
+ @mock.patch.object(host.Host, "_check_machine_type", new=mock.Mock())
def test_get_guest_config_machine_type_through_image_meta(self):
cfg = self._get_guest_config_machine_type_through_image_meta(
"fake_machine_type")
self.assertEqual(cfg.os_mach_type, "fake_machine_type")
- def test_get_guest_config_machine_type_through_image_meta_sev(self):
+ @mock.patch.object(host.Host, "_check_machine_type", new=mock.Mock())
+ def test_get_guest_config_machine_type_through_image_meta_sev(
+ self):
fake_q35 = "fake-q35-2.11"
cfg = self._get_guest_config_machine_type_through_image_meta(fake_q35)
self.assertEqual(cfg.os_mach_type, fake_q35)
+ @mock.patch.object(host.Host, "_check_machine_type", new=mock.Mock())
def test_get_guest_config_machine_type_from_config(self):
self.flags(virt_type='kvm', group='libvirt')
self.flags(hw_machine_type=['x86_64=fake_machine_type'],
@@ -7731,11 +8048,14 @@ class LibvirtConnTestCase(test.NoDBTestCase,
cfg.devices[device_index], vconfig.LibvirtConfigGuestVideo)
self.assertEqual(cfg.devices[device_index].type, 'vga')
+ @mock.patch.object(host.Host, "_check_machine_type", new=mock.Mock())
def test_get_guest_config_ppc64_through_image_meta_vnc_enabled(self):
self.flags(enabled=True, group='vnc')
self._test_get_guest_config_ppc64(4)
- def test_get_guest_config_ppc64_through_image_meta_spice_enabled(self):
+ @mock.patch.object(host.Host, "_check_machine_type", new=mock.Mock())
+ def test_get_guest_config_ppc64_through_image_meta_spice_enabled(
+ self):
self.flags(enabled=True,
agent_enabled=True,
group='spice')
@@ -7823,6 +8143,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
image_meta, disk_info)
self.assertIsNone(conf.cpu)
+ @mock.patch.object(host.Host, "_check_machine_type", new=mock.Mock())
def test_get_guest_cpu_config_automatic(self):
expected = {
fields.Architecture.X86_64: "host-model",
@@ -7903,6 +8224,33 @@ class LibvirtConnTestCase(test.NoDBTestCase,
self.assertIsInstance(conf.cpu, vconfig.LibvirtConfigGuestCPU)
self.assertEqual(conf.cpu.mode, 'custom')
+ def test_get_x86_64_hw_emulated_architecture_aarch64(self):
+ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ image_meta = objects.ImageMeta.from_dict({
+ "disk_format": "raw",
+ 'properties': {
+ 'hw_architecture': 'x86_64',
+ 'hw_emulation_architecture': 'aarch64',
+ 'hw_machine_type': 'virt',
+ 'hw_firmware_type': 'uefi',
+ }})
+
+ self.assertEqual(drvr._check_emulation_arch(image_meta),
+ 'aarch64')
+
+ def test_get_x86_64_hw_emulated_architecture_ppc64(self):
+ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ image_meta = objects.ImageMeta.from_dict({
+ "disk_format": "raw",
+ 'properties': {
+ 'hw_architecture': 'x86_64',
+ 'hw_emulation_architecture': 'ppc64le',
+ 'hw_machine_type': 'pseries',
+ }})
+
+ self.assertEqual(drvr._check_emulation_arch(image_meta),
+ 'ppc64le')
+
@mock.patch.object(libvirt_driver.LOG, 'warning')
def test_get_guest_cpu_config_custom_with_extra_flags(self,
mock_warn):
@@ -8284,6 +8632,206 @@ class LibvirtConnTestCase(test.NoDBTestCase,
self.assertEqual(conf.cpu.cores, 2)
self.assertEqual(conf.cpu.threads, 1)
+ def test_get_guest_iommu_not_enabled(self):
+ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+
+ test_instance = _create_test_instance()
+ instance_ref = objects.Instance(**test_instance)
+ image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref,
+ image_meta)
+
+ cfg = drvr._get_guest_config(instance_ref, [],
+ image_meta, disk_info)
+ for device in cfg.devices:
+ self.assertNotEqual('iommu', device.root_name)
+
+ def test_get_guest_iommu_config_model(self):
+ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ image_meta = objects.ImageMeta.from_dict({
+ "disk_format": "raw",
+ "properties": {"hw_machine_type": "q35"},
+ })
+
+ extra_specs = {
+ "hw:viommu_model": 'intel',
+ }
+ test_instance = _create_test_instance()
+ test_instance["flavor"]["extra_specs"] = extra_specs
+ instance_ref = objects.Instance(**test_instance)
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref,
+ image_meta)
+
+ cfg = drvr._get_guest_config(instance_ref, [],
+ image_meta, disk_info)
+ count = 0
+ for device in cfg.devices:
+ if device.root_name == 'iommu':
+ count += 1
+ self.assertIsInstance(device,
+ vconfig.LibvirtConfigGuestIOMMU)
+ self.assertEqual('intel', device.model)
+ self.assertFalse(hasattr(device, "aw_bits"))
+ self.assertTrue(device.interrupt_remapping)
+ self.assertTrue(device.caching_mode)
+ self.assertTrue(device.eim)
+ self.assertTrue(device.iotlb)
+
+ self.assertEqual(1, count)
+ self.assertEqual('q35', cfg.os_mach_type)
+
+ @mock.patch.object(host.Host, 'has_min_version', return_value=True)
+ def test_get_guest_iommu_config_model_auto(self, has_min_version):
+ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ image_meta = objects.ImageMeta.from_dict({
+ "disk_format": "raw",
+ "properties": {"hw_machine_type": "q35"},
+ })
+
+ extra_specs = {
+ "hw:viommu_model": 'auto',
+ }
+ test_instance = _create_test_instance()
+ test_instance["flavor"]["extra_specs"] = extra_specs
+ instance_ref = objects.Instance(**test_instance)
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref,
+ image_meta)
+
+ cfg = drvr._get_guest_config(instance_ref, [],
+ image_meta, disk_info)
+ count = 0
+ for device in cfg.devices:
+ if device.root_name == 'iommu':
+ count += 1
+ self.assertIsInstance(device,
+ vconfig.LibvirtConfigGuestIOMMU)
+ self.assertEqual('virtio', device.model)
+ self.assertEqual(48, device.aw_bits)
+ self.assertTrue(device.interrupt_remapping)
+ self.assertTrue(device.caching_mode)
+ self.assertTrue(device.eim)
+ self.assertTrue(device.iotlb)
+
+ self.assertEqual(1, count)
+ self.assertEqual('q35', cfg.os_mach_type)
+
+ @mock.patch.object(host.Host, 'has_min_version', return_value=False)
+ def test_get_guest_iommu_config_model_auto_intel(self, has_min_version):
+ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ image_meta = objects.ImageMeta.from_dict({
+ "disk_format": "raw",
+ "properties": {"hw_machine_type": "q35"},
+ })
+
+ extra_specs = {
+ "hw:viommu_model": 'auto',
+ }
+ test_instance = _create_test_instance()
+ test_instance["flavor"]["extra_specs"] = extra_specs
+ instance_ref = objects.Instance(**test_instance)
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref,
+ image_meta)
+
+ cfg = drvr._get_guest_config(instance_ref, [],
+ image_meta, disk_info)
+ count = 0
+ for device in cfg.devices:
+ if device.root_name == 'iommu':
+ count += 1
+ self.assertIsInstance(device,
+ vconfig.LibvirtConfigGuestIOMMU)
+ self.assertEqual('intel', device.model)
+ self.assertTrue(device.interrupt_remapping)
+ self.assertTrue(device.caching_mode)
+ self.assertTrue(device.eim)
+ self.assertTrue(device.iotlb)
+
+ self.assertEqual(1, count)
+ self.assertEqual('q35', cfg.os_mach_type)
+
+ @mock.patch.object(host.Host, 'has_min_version', return_value=False)
+ def test_get_guest_iommu_config_model_auto_aarch64(self, has_min_version):
+ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+
+ image_meta = objects.ImageMeta.from_dict({
+ "disk_format": "raw",
+ "properties": {"hw_viommu_model": 'auto',
+ "hw_architecture": fields.Architecture.AARCH64,
+ "hw_machine_type": "virt"},
+ })
+ extra_specs = {
+ "hw:viommu_model": 'auto',
+ }
+ test_instance = _create_test_instance()
+ test_instance["flavor"]["extra_specs"] = extra_specs
+ instance_ref = objects.Instance(**test_instance)
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref,
+ image_meta)
+
+ cfg = drvr._get_guest_config(instance_ref, [],
+ image_meta, disk_info)
+ count = 0
+ for device in cfg.devices:
+ if device.root_name == 'iommu':
+ count += 1
+ self.assertIsInstance(device,
+ vconfig.LibvirtConfigGuestIOMMU)
+ self.assertEqual('smmuv3', device.model)
+ self.assertFalse(hasattr(device, "aw_bits"))
+ self.assertTrue(device.interrupt_remapping)
+ self.assertTrue(device.caching_mode)
+ self.assertFalse(device.eim)
+ self.assertTrue(device.iotlb)
+ self.assertEqual(1, count)
+
+ def test_get_guest_iommu_config_not_support_machine_type(self):
+ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ image_meta = objects.ImageMeta.from_dict({
+ "disk_format": "raw",
+ "properties": {"hw_machine_type": "pc-i440fx-2.11"},
+ })
+ extra_specs = {
+ "hw:viommu_model": 'auto',
+ }
+ test_instance = _create_test_instance()
+ test_instance["flavor"]["extra_specs"] = extra_specs
+ instance_ref = objects.Instance(**test_instance)
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref,
+ image_meta)
+
+ self.assertRaises(
+ exception.InvalidVIOMMUMachineType, drvr._get_guest_config,
+ instance_ref, [], image_meta, disk_info
+ )
+
+ def test_get_guest_iommu_config_not_support_architecture(self):
+ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ image_meta = objects.ImageMeta.from_dict({
+ "disk_format": "raw",
+ "properties": {"hw_architecture": fields.Architecture.PPC64LE,
+ "hw_machine_type": "pc-i440fx-2.11"},
+ })
+ extra_specs = {
+ "hw:viommu_model": 'auto',
+ }
+ test_instance = _create_test_instance()
+ test_instance["flavor"]["extra_specs"] = extra_specs
+ instance_ref = objects.Instance(**test_instance)
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref,
+ image_meta)
+
+ self.assertRaises(
+ exception.InvalidVIOMMUArchitecture, drvr._get_guest_config,
+ instance_ref, [], image_meta, disk_info
+ )
+
def test_get_guest_memory_balloon_config_by_default(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
@@ -8373,6 +8921,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
break
self.assertTrue(no_exist)
+ @mock.patch.object(host.Host, "_check_machine_type", new=mock.Mock())
def test_get_guest_usb_controller(self):
self.flags(enabled=True, group='vnc')
@@ -8525,6 +9074,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
None,
(("disk", "virtio", "vda"),))
+ @mock.patch.object(host.Host, "_check_machine_type", new=mock.Mock())
def test_xml_disk_bus_ide(self):
# It's necessary to check if the architecture is power, because
# power doesn't have support to ide, and so libvirt translate
@@ -8560,6 +9110,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
block_device_info,
(expected,))
+ @mock.patch.object(host.Host, "_check_machine_type", new=mock.Mock())
def test_xml_disk_bus_ide_and_virtio(self):
expected = {
fields.Architecture.X86_64: ("cdrom", "ide", "hda"),
@@ -8683,6 +9234,34 @@ class LibvirtConnTestCase(test.NoDBTestCase,
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.assertRaises(exception.Invalid, drvr._get_pcpu_available)
+ @mock.patch('nova.virt.libvirt.host.Host.get_available_cpus',
+ return_value=set([0, 1, 2, 3]))
+ def test_get_pcpu_available_for_power_mgmt(self, get_available_cpus):
+ """Test what happens when the '[compute] cpu_dedicated_set' config
+ option is set and power management is defined.
+ """
+ self.flags(vcpu_pin_set=None)
+ self.flags(cpu_dedicated_set='2-3', cpu_shared_set=None,
+ group='compute')
+ self.flags(cpu_power_management=True, group='libvirt')
+ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ pcpus = drvr._get_pcpu_available()
+ self.assertEqual(set([2, 3]), pcpus)
+
+ @mock.patch('nova.virt.libvirt.host.Host.get_available_cpus',
+ return_value=set([4, 5]))
+ def test_get_pcpu_available__cpu_dedicated_set_invalid_for_pm(self,
+ get_available_cpus):
+ """Test what happens when the '[compute] cpu_dedicated_set' config
+ option is set but it's invalid with power management set.
+ """
+ self.flags(vcpu_pin_set=None)
+ self.flags(cpu_dedicated_set='4-6', cpu_shared_set=None,
+ group='compute')
+ self.flags(cpu_power_management=True, group='libvirt')
+ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ self.assertRaises(exception.Invalid, drvr._get_pcpu_available)
+
@mock.patch('nova.virt.libvirt.host.Host.get_online_cpus',
return_value=set([0, 1, 2, 3]))
def test_get_vcpu_available(self, get_online_cpus):
@@ -8783,6 +9362,26 @@ class LibvirtConnTestCase(test.NoDBTestCase,
image_meta))
mock_fsthaw.assert_called_once_with()
+ def test_set_quiesced_agent_connection_fails(self):
+ # This is require to mock guest host
+ self.create_fake_libvirt_mock(lookupByUUIDString=self.fake_lookup)
+
+ with mock.patch.object(FakeVirtDomain, "fsFreeze") as mock_fsfreeze:
+ error = fakelibvirt.make_libvirtError(
+ fakelibvirt.libvirtError,
+ "QEMU guest agent is not connected",
+ error_code=fakelibvirt.VIR_ERR_AGENT_UNRESPONSIVE)
+
+ mock_fsfreeze.side_effect = error
+ mock_fsfreeze.error_code = error.get_error_code()
+
+ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI())
+ instance = objects.Instance(**self.test_instance)
+ image_meta = objects.ImageMeta.from_dict(
+ {"properties": {"hw_qemu_guest_agent": "yes", }})
+ self.assertRaises(exception.InstanceQuiesceFailed,
+ drvr._set_quiesced, self.context, instance, image_meta, True)
+
def test_create_snapshot_metadata(self):
base = objects.ImageMeta.from_dict(
{'disk_format': 'raw'})
@@ -9925,6 +10524,61 @@ class LibvirtConnTestCase(test.NoDBTestCase,
# is called with the LUKSv1 payload offset taken into account.
block_device.resize.assert_called_once_with(new_size_minus_offset)
+ @mock.patch('nova.virt.libvirt.driver.LibvirtDriver._get_volume_encryptor')
+ @mock.patch('os_brick.encryptors.get_encryption_metadata')
+ def test_extend_volume_os_brick_block(self, mock_get_encryption_metadata,
+ mock_get_encryptor):
+ """Test extend volume that uses an os-brick encryptor."""
+ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ instance = objects.Instance(**self.test_instance)
+ connection_info = {
+ 'serial': uuids.volume_id,
+ 'driver_volume_type': 'fake',
+ 'data': {
+ 'device_path': mock.sentinel.device_path,
+ 'access_mode': 'rw'
+ }
+ }
+
+ block_device = mock.Mock(spec=libvirt_guest.BlockDevice,
+ _disk=mock.sentinel.disk)
+ guest = mock.Mock(spec=libvirt_guest.Guest)
+ guest.get_block_device.return_value = block_device
+ guest.get_power_state.return_value = power_state.RUNNING
+
+ # The requested_size is provided to extend_volume in bytes.
+ new_size = 20 * units.Gi
+ # Decrypted volume size reported by os-brick will be smaller
+ new_size_minus_offset = new_size - (16384 * units.Ki)
+
+ mock_brick_extend = mock_get_encryptor.return_value.extend_volume
+ mock_brick_extend.return_value = new_size_minus_offset
+
+ drvr._host.get_guest = mock.Mock(return_value=guest)
+ drvr._extend_volume = mock.Mock(return_value=new_size)
+
+ encryption = {'provider': 'luks2', 'control_location': 'front-end'}
+ mock_get_encryption_metadata.return_value = encryption
+
+ # Extend the volume to new_size
+ drvr.extend_volume(self.context, connection_info, instance, new_size)
+
+ # Assert that the expected calls are made prior to the device resize.
+ drvr._host.get_guest.assert_called_once_with(instance)
+ guest.get_power_state.assert_called_once_with(drvr._host)
+ guest.get_block_device(mock.sentinel.device_path)
+
+ # Assert calls to the os-brick encryptor extend
+ mock_get_encryptor.assert_called_once_with(connection_info, encryption)
+ mock_brick_extend.assert_called_once_with(self.context, **encryption)
+
+ mock_get_encryption_metadata.assert_called_once_with(
+ self.context, drvr._volume_api, uuids.volume_id, connection_info)
+
+ # Assert that the Libvirt call to resize the device within the instance
+ # is called with the size reported by os-brick
+ block_device.resize.assert_called_once_with(new_size_minus_offset)
+
@mock.patch('os_brick.encryptors.get_encryption_metadata')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver._get_volume_encryptor')
def test_use_encryptor_connection_info_incomplete(self,
@@ -10606,11 +11260,11 @@ class LibvirtConnTestCase(test.NoDBTestCase,
self.assertEqual(drvr._uri(), testuri)
@mock.patch(
- 'nova.network.neutron.API.supports_port_binding_extension',
+ 'nova.network.neutron.API.has_port_binding_extension',
new=mock.Mock(return_value=False))
@mock.patch.object(libvirt_driver.LibvirtDriver,
'_create_shared_storage_test_file')
- @mock.patch.object(fakelibvirt.Connection, 'compareCPU')
+ @mock.patch.object(fakelibvirt.Connection, 'compareHypervisorCPU')
def test_check_can_live_migrate_dest_all_pass_with_block_migration(
self, mock_cpu, mock_test_file,
):
@@ -10645,11 +11299,11 @@ class LibvirtConnTestCase(test.NoDBTestCase,
return_value.obj_to_primitive()['nova_object.data'])
@mock.patch(
- 'nova.network.neutron.API.supports_port_binding_extension',
+ 'nova.network.neutron.API.has_port_binding_extension',
new=mock.Mock(return_value=False))
@mock.patch.object(libvirt_driver.LibvirtDriver,
'_create_shared_storage_test_file')
- @mock.patch.object(fakelibvirt.Connection, 'compareCPU')
+ @mock.patch.object(fakelibvirt.Connection, 'compareHypervisorCPU')
def test_check_can_live_migrate_dest_all_pass_with_over_commit(
self, mock_cpu, mock_test_file,
):
@@ -10685,11 +11339,11 @@ class LibvirtConnTestCase(test.NoDBTestCase,
return_value.obj_to_primitive()['nova_object.data'])
@mock.patch(
- 'nova.network.neutron.API.supports_port_binding_extension',
+ 'nova.network.neutron.API.has_port_binding_extension',
new=mock.Mock(return_value=False))
@mock.patch.object(libvirt_driver.LibvirtDriver,
'_create_shared_storage_test_file')
- @mock.patch.object(fakelibvirt.Connection, 'compareCPU')
+ @mock.patch.object(fakelibvirt.Connection, 'compareHypervisorCPU')
def test_check_can_live_migrate_dest_all_pass_no_block_migration(
self, mock_cpu, mock_test_file,
):
@@ -10722,12 +11376,12 @@ class LibvirtConnTestCase(test.NoDBTestCase,
return_value.obj_to_primitive()['nova_object.data'])
@mock.patch(
- 'nova.network.neutron.API.supports_port_binding_extension',
+ 'nova.network.neutron.API.has_port_binding_extension',
new=mock.Mock(return_value=False))
@mock.patch.object(libvirt_driver.LibvirtDriver,
'_create_shared_storage_test_file',
return_value='fake')
- @mock.patch.object(fakelibvirt.Connection, 'compareCPU')
+ @mock.patch.object(fakelibvirt.Connection, 'compareHypervisorCPU')
def test_check_can_live_migrate_dest_fills_listen_addrs(
self, mock_cpu, mock_test_file,
):
@@ -10754,12 +11408,12 @@ class LibvirtConnTestCase(test.NoDBTestCase,
str(result.serial_listen_addr))
@mock.patch(
- 'nova.network.neutron.API.supports_port_binding_extension',
+ 'nova.network.neutron.API.has_port_binding_extension',
new=mock.Mock(return_value=False))
@mock.patch.object(libvirt_driver.LibvirtDriver,
'_create_shared_storage_test_file',
return_value='fake')
- @mock.patch.object(fakelibvirt.Connection, 'compareCPU',
+ @mock.patch.object(fakelibvirt.Connection, 'compareHypervisorCPU',
return_value=1)
def test_check_can_live_migrate_dest_ensure_serial_adds_not_set(
self, mock_cpu, mock_test_file,
@@ -10775,7 +11429,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
self.assertIsNone(result.serial_listen_addr)
@mock.patch(
- 'nova.network.neutron.API.supports_port_binding_extension',
+ 'nova.network.neutron.API.has_port_binding_extension',
new=mock.Mock(return_value=False))
@mock.patch.object(libvirt_driver.LibvirtDriver,
'_create_shared_storage_test_file',
@@ -10808,7 +11462,26 @@ class LibvirtConnTestCase(test.NoDBTestCase,
result.obj_to_primitive()['nova_object.data'])
@mock.patch(
- 'nova.network.neutron.API.supports_port_binding_extension',
+ 'nova.network.neutron.API.has_port_binding_extension',
+ new=mock.Mock(return_value=False))
+ @mock.patch.object(libvirt_driver.LibvirtDriver,
+ '_create_shared_storage_test_file',
+ return_value='fake')
+ @mock.patch.object(libvirt_driver.LibvirtDriver, '_compare_cpu')
+ def test_check_can_live_migrate_guest_cpu_none_model_skip_compare(
+ self, mock_cpu, mock_test_file):
+ self.flags(group='workarounds', skip_cpu_compare_on_dest=True)
+ instance_ref = objects.Instance(**self.test_instance)
+ instance_ref.vcpu_model = test_vcpu_model.fake_vcpumodel
+ instance_ref.vcpu_model.model = None
+ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ compute_info = {'cpu_info': 'asdf', 'disk_available_least': 1}
+ drvr.check_can_live_migrate_destination(
+ self.context, instance_ref, compute_info, compute_info)
+ mock_cpu.assert_not_called()
+
+ @mock.patch(
+ 'nova.network.neutron.API.has_port_binding_extension',
new=mock.Mock(return_value=False))
@mock.patch.object(libvirt_driver.LibvirtDriver,
'_create_shared_storage_test_file',
@@ -10827,7 +11500,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
self.assertTrue(result.dst_supports_numa_live_migration)
@mock.patch(
- 'nova.network.neutron.API.supports_port_binding_extension',
+ 'nova.network.neutron.API.has_port_binding_extension',
new=mock.Mock(return_value=False))
@mock.patch.object(libvirt_driver.LibvirtDriver,
'_create_shared_storage_test_file',
@@ -10844,11 +11517,11 @@ class LibvirtConnTestCase(test.NoDBTestCase,
self.assertNotIn('dst_supports_numa_live_migration', result)
@mock.patch(
- 'nova.network.neutron.API.supports_port_binding_extension',
+ 'nova.network.neutron.API.has_port_binding_extension',
new=mock.Mock(return_value=False))
@mock.patch.object(libvirt_driver.LibvirtDriver,
'_create_shared_storage_test_file')
- @mock.patch.object(fakelibvirt.Connection, 'compareCPU')
+ @mock.patch.object(fakelibvirt.Connection, 'compareHypervisorCPU')
def test_check_can_live_migrate_dest_no_instance_cpu_info(
self, mock_cpu, mock_test_file,
):
@@ -10885,11 +11558,11 @@ class LibvirtConnTestCase(test.NoDBTestCase,
return_value.obj_to_primitive()['nova_object.data'])
@mock.patch(
- 'nova.network.neutron.API.supports_port_binding_extension',
+ 'nova.network.neutron.API.has_port_binding_extension',
new=mock.Mock(return_value=False))
@mock.patch.object(libvirt_driver.LibvirtDriver,
'_create_shared_storage_test_file')
- @mock.patch.object(fakelibvirt.Connection, 'compareCPU')
+ @mock.patch.object(fakelibvirt.Connection, 'compareHypervisorCPU')
def test_check_can_live_migrate_dest_file_backed(
self, mock_cpu, mock_test_file,
):
@@ -10915,7 +11588,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
self.assertTrue(return_value.dst_wants_file_backed_memory)
- @mock.patch.object(fakelibvirt.Connection, 'compareCPU')
+ @mock.patch.object(fakelibvirt.Connection, 'compareHypervisorCPU')
def test_check_can_live_migrate_dest_incompatible_cpu_raises(
self, mock_cpu):
instance_ref = objects.Instance(**self.test_instance)
@@ -10930,7 +11603,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
compute_info, compute_info, False)
@mock.patch(
- 'nova.network.neutron.API.supports_port_binding_extension',
+ 'nova.network.neutron.API.has_port_binding_extension',
new=mock.Mock(return_value=True))
@mock.patch.object(
libvirt_driver.LibvirtDriver,
@@ -10951,7 +11624,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
for vif in result.vifs:
self.assertTrue(vif.supports_os_vif_delegation)
- @mock.patch.object(host.Host, 'compare_cpu')
+ @mock.patch.object(host.Host, 'compare_hypervisor_cpu')
@mock.patch.object(nova.virt.libvirt, 'config')
def test_compare_cpu_compatible_host_cpu(self, mock_vconfig, mock_compare):
instance = objects.Instance(**self.test_instance)
@@ -10961,7 +11634,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
instance)
self.assertIsNone(ret)
- @mock.patch.object(host.Host, 'compare_cpu')
+ @mock.patch.object(host.Host, 'compare_hypervisor_cpu')
@mock.patch.object(nova.virt.libvirt, 'config')
def test_compare_cpu_handles_not_supported_error_gracefully(self,
mock_vconfig,
@@ -10998,7 +11671,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
@mock.patch.object(fakelibvirt.Connection, 'getLibVersion',
return_value=versionutils.convert_version_to_int(
libvirt_driver.MIN_LIBVIRT_AARCH64_CPU_COMPARE))
- @mock.patch.object(host.Host, 'compare_cpu')
+ @mock.patch.object(host.Host, 'compare_hypervisor_cpu')
def test_compare_cpu_host_aarch64(self,
mock_compare,
mock_get_libversion,
@@ -11021,7 +11694,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
mock_compare.assert_called_once_with(caps.host.cpu.to_xml())
self.assertIsNone(ret)
- @mock.patch.object(host.Host, 'compare_cpu')
+ @mock.patch.object(host.Host, 'compare_hypervisor_cpu')
@mock.patch.object(nova.virt.libvirt.LibvirtDriver,
'_vcpu_model_to_cpu_config')
def test_compare_cpu_compatible_guest_cpu(self, mock_vcpu_to_cpu,
@@ -11040,7 +11713,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
ret = conn._compare_cpu(None, None, instance)
self.assertIsNone(ret)
- @mock.patch.object(host.Host, 'compare_cpu')
+ @mock.patch.object(host.Host, 'compare_hypervisor_cpu')
@mock.patch.object(nova.virt.libvirt, 'config')
def test_compare_cpu_invalid_cpuinfo_raises(self, mock_vconfig,
mock_compare):
@@ -11052,7 +11725,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
jsonutils.dumps(_fake_cpu_info),
instance)
- @mock.patch.object(host.Host, 'compare_cpu')
+ @mock.patch.object(host.Host, 'compare_hypervisor_cpu')
@mock.patch.object(nova.virt.libvirt, 'config')
def test_compare_cpu_incompatible_cpu_raises(self, mock_vconfig,
mock_compare):
@@ -11247,7 +11920,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
'_check_shared_storage_test_file')
def _test_check_can_live_migrate_source_block_migration_none(
self, block_migrate, is_shared_instance_path, is_share_block,
- mock_check, mock_shared_block, mock_enough, mock_verson):
+ mock_check, mock_shared_block, mock_enough, mock_version):
mock_check.return_value = is_shared_instance_path
mock_shared_block.return_value = is_share_block
@@ -11283,13 +11956,11 @@ class LibvirtConnTestCase(test.NoDBTestCase,
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver.'
'_assert_dest_node_has_enough_disk')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver.'
- '_assert_dest_node_has_enough_disk')
- @mock.patch('nova.virt.libvirt.driver.LibvirtDriver.'
'_is_shared_block_storage')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver.'
'_check_shared_storage_test_file')
def test_check_can_live_migration_source_disk_over_commit_none(self,
- mock_check, mock_shared_block, mock_enough, mock_disk_check):
+ mock_check, mock_shared_block, mock_disk_check):
mock_check.return_value = False
mock_shared_block.return_value = False
@@ -11507,7 +12178,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
mock_migrateToURI3,
mock_min_version):
self.compute = manager.ComputeManager()
- instance_ref = self.test_instance
+ instance_ref = objects.Instance(**self.test_instance)
target_connection = '127.0.0.2'
xml_tmpl = ("<domain type='kvm'>"
@@ -12187,7 +12858,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
mock_get,
mock_min_version):
self.compute = manager.ComputeManager()
- instance_ref = self.test_instance
+ instance_ref = objects.Instance(**self.test_instance)
target_connection = '127.0.0.2'
xml_tmpl = ("<domain type='kvm'>"
@@ -12477,7 +13148,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
mock_min_version):
# Prepare data
self.compute = manager.ComputeManager()
- instance_ref = self.test_instance
+ instance_ref = objects.Instance(**self.test_instance)
target_connection = '127.0.0.2'
disk_paths = ['vda', 'vdb']
@@ -13485,6 +14156,85 @@ class LibvirtConnTestCase(test.NoDBTestCase,
def test_live_migration_main_monitoring_failed(self):
self._test_live_migration_main(mon_side_effect=Exception)
+ @mock.patch.object(host.Host, "get_connection", new=mock.Mock())
+ @mock.patch.object(utils, "spawn", new=mock.Mock())
+ @mock.patch.object(host.Host, "get_guest")
+ @mock.patch.object(
+ libvirt_driver.LibvirtDriver, "_live_migration_copy_disk_paths")
+ def _test_live_migration_monitor_job_stats_exception(
+ self, exc, mock_copy_disk_paths, mock_get_guest, expect_success=True
+ ):
+ # Verify behavior when various exceptions are raised inside of
+ # Guest.get_job_info() during live migration monitoring.
+ mock_domain = mock.Mock(fakelibvirt.virDomain)
+ guest = libvirt_guest.Guest(mock_domain)
+ mock_get_guest.return_value = guest
+
+ # First, raise the exception from jobStats(), then return "completed"
+ # to make sure we exit the monitoring loop.
+ guest._domain.jobStats.side_effect = [
+ exc,
+ {'type': fakelibvirt.VIR_DOMAIN_JOB_COMPLETED},
+ ]
+
+ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ instance = objects.Instance(**self.test_instance)
+ post_method = mock.Mock()
+ migrate_data = mock.Mock()
+ disks_to_copy = (['/some/path/one', '/test/path/two'],
+ ['vda', 'vdb'])
+ mock_copy_disk_paths.return_value = disks_to_copy
+
+ func = drvr._live_migration
+ args = (self.context, instance, mock.sentinel.dest, post_method,
+ mock.sentinel.recover_method, mock.sentinel.block_migration,
+ migrate_data)
+
+ if expect_success:
+ func(*args)
+ post_method.assert_called_once_with(
+ self.context, instance, mock.sentinel.dest,
+ mock.sentinel.block_migration, migrate_data
+ )
+ else:
+ actual_exc = self.assertRaises(
+ fakelibvirt.libvirtError, func, *args)
+ self.assertEqual(exc, actual_exc)
+
+ def test_live_migration_monitor_job_stats_no_domain(self):
+ exp = fakelibvirt.make_libvirtError(
+ fakelibvirt.libvirtError, 'no domain',
+ error_code=fakelibvirt.VIR_ERR_NO_DOMAIN
+ )
+ self._test_live_migration_monitor_job_stats_exception(
+ exp, expect_success=True)
+
+ def test_live_migration_monitor_job_stats_op_invalid(self):
+ exp = fakelibvirt.make_libvirtError(
+ fakelibvirt.libvirtError, 'operation invalid',
+ error_code=fakelibvirt.VIR_ERR_OPERATION_INVALID
+ )
+ self._test_live_migration_monitor_job_stats_exception(
+ exp, expect_success=True)
+
+ def test_live_migration_monitor_job_stats_no_ram_info_set(self):
+ exp = fakelibvirt.make_libvirtError(
+ fakelibvirt.libvirtError, 'internal error',
+ error_message='migration was active, but no RAM info was set',
+ error_code=fakelibvirt.VIR_ERR_INTERNAL_ERROR
+ )
+ self._test_live_migration_monitor_job_stats_exception(
+ exp, expect_success=True)
+
+ def test_live_migration_monitor_job_stats_internal_error(self):
+ exp = fakelibvirt.make_libvirtError(
+ fakelibvirt.libvirtError,
+ 'some other internal error',
+ error_code=fakelibvirt.VIR_ERR_INTERNAL_ERROR
+ )
+ self._test_live_migration_monitor_job_stats_exception(
+ exp, expect_success=False)
+
@mock.patch('os.path.exists', return_value=False)
@mock.patch('nova.virt.libvirt.utils.create_image')
@mock.patch.object(libvirt_driver.LibvirtDriver,
@@ -13504,7 +14254,10 @@ class LibvirtConnTestCase(test.NoDBTestCase,
mock_fetch.assert_called_once_with(self.context, instance,
fallback_from_host=None)
mock_create.assert_called_once_with(
- disk_info['type'], mock.ANY, disk_info['virt_disk_size'])
+ '/fake/instance/dir/foo',
+ disk_info['type'],
+ disk_info['virt_disk_size'],
+ )
mock_exists.assert_called_once_with('/fake/instance/dir/foo')
def test_create_images_and_backing_qcow2(self):
@@ -13536,7 +14289,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
self.context, instance,
"/fake/instance/dir", disk_info)
- @mock.patch('nova.virt.libvirt.utils.create_cow_image')
+ @mock.patch('nova.virt.libvirt.utils.create_image')
@mock.patch('nova.privsep.path.utime')
def test_create_images_and_backing_images_not_exist_fallback(
self, mock_utime, mock_create_cow_image):
@@ -13616,7 +14369,72 @@ class LibvirtConnTestCase(test.NoDBTestCase,
mock_utime.assert_called()
mock_create_cow_image.assert_called_once_with(
- backfile_path, '/fake/instance/dir/disk_path', virt_disk_size)
+ '/fake/instance/dir/disk_path',
+ 'qcow2',
+ virt_disk_size,
+ backing_file=backfile_path,
+ )
+
+ @mock.patch('nova.virt.libvirt.imagebackend.Image.exists',
+ new=mock.Mock(return_value=True))
+ def test_create_images_backing_images_and_fallback_not_exist(self):
+ self.flags(images_type='raw', group='libvirt')
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+
+ base_dir = os.path.join(CONF.instances_path,
+ CONF.image_cache.subdirectory_name)
+ self.test_instance.update({
+ 'user_id': 'fake-user',
+ 'os_type': None,
+ 'kernel_id': uuids.kernel_id,
+ 'ramdisk_id': uuids.ramdisk_id,
+ 'project_id': 'fake-project'
+ })
+ instance = objects.Instance(**self.test_instance)
+
+ backing_file = imagecache.get_cache_fname(instance.image_ref)
+ backfile_path = os.path.join(base_dir, backing_file)
+ disk_size = 10747904
+ virt_disk_size = 25165824
+ disk_info = [{
+ 'backing_file': backing_file,
+ 'disk_size': disk_size,
+ 'path': 'disk_path',
+ 'type': 'raw',
+ 'virt_disk_size': virt_disk_size
+ }]
+
+ with test.nested(
+ mock.patch.object(libvirt_driver.libvirt_utils, 'copy_image'),
+ mock.patch.object(libvirt_driver.libvirt_utils, 'fetch_image',
+ side_effect=exception.ImageNotFound(
+ image_id=uuids.fake_id)),
+ mock.patch.object(imagebackend.Flat, 'resize_image'),
+ ) as (copy_image_mock, fetch_image_mock, resize_image_mock):
+ conn._create_images_and_backing(
+ self.context, instance, "/fake/instance/dir", disk_info,
+ fallback_from_host="fake_host")
+ kernel_path = os.path.join(CONF.instances_path,
+ self.test_instance['uuid'], 'kernel')
+ ramdisk_path = os.path.join(CONF.instances_path,
+ self.test_instance['uuid'], 'ramdisk')
+ copy_image_mock.assert_has_calls([
+ mock.call(dest=kernel_path, src=kernel_path,
+ host='fake_host', receive=True),
+ mock.call(dest=ramdisk_path, src=ramdisk_path,
+ host='fake_host', receive=True)
+ ])
+ fetch_image_mock.assert_has_calls([
+ mock.call(context=self.context,
+ target=backfile_path,
+ image_id=self.test_instance['image_ref'],
+ trusted_certs=None),
+ mock.call(self.context, kernel_path, instance.kernel_id,
+ None),
+ mock.call(self.context, ramdisk_path, instance.ramdisk_id,
+ None)
+ ])
+ resize_image_mock.assert_called_once_with(virt_disk_size)
@mock.patch('nova.virt.libvirt.utils.create_image',
new=mock.NonCallableMock())
@@ -13648,7 +14466,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
self.assertFalse(mock_fetch_image.called)
@mock.patch('nova.privsep.path.utime')
- @mock.patch('nova.virt.libvirt.utils.create_cow_image')
+ @mock.patch('nova.virt.libvirt.utils.create_image')
def test_create_images_and_backing_ephemeral_gets_created(
self, mock_create_cow_image, mock_utime):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
@@ -13701,14 +14519,16 @@ class LibvirtConnTestCase(test.NoDBTestCase,
# TODO(efried): Should these be disk_info[path]??
mock_create_cow_image.assert_has_calls([
mock.call(
- root_backing,
CONF.instances_path + '/disk',
- disk_info_byname['disk']['virt_disk_size']
+ 'qcow2',
+ disk_info_byname['disk']['virt_disk_size'],
+ backing_file=root_backing,
),
mock.call(
- ephemeral_backing,
CONF.instances_path + '/disk.local',
- disk_info_byname['disk.local']['virt_disk_size']
+ 'qcow2',
+ disk_info_byname['disk.local']['virt_disk_size'],
+ backing_file=ephemeral_backing,
),
])
@@ -14523,7 +15343,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
@mock.patch('os.path.getsize')
def test_get_instance_disk_info_no_bdinfo_passed(self, mock_get_size,
mock_stat):
- # NOTE(ndipanov): _get_disk_overcomitted_size_total calls this method
+ # NOTE(ndipanov): _get_disk_overcommitted_size_total calls this method
# without access to Nova's block device information. We want to make
# sure that we guess volumes mostly correctly in that case as well
instance = objects.Instance(**self.test_instance)
@@ -14566,6 +15386,10 @@ class LibvirtConnTestCase(test.NoDBTestCase,
mock_stat.assert_called_once_with(path)
mock_get_size.assert_called_once_with(path)
+ @mock.patch.object(host.Host, "_check_machine_type", new=mock.Mock())
+ @mock.patch.object(libvirt_driver.LibvirtDriver,
+ '_register_undefined_instance_details',
+ new=mock.Mock())
def test_spawn_with_network_info(self, power_on=True):
def fake_getLibVersion():
return fakelibvirt.FAKE_LIBVIRT_VERSION
@@ -14700,6 +15524,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
mock_build_device_metadata.assert_called_once_with(self.context,
instance)
+ @mock.patch.object(host.Host, "_check_machine_type", new=mock.Mock())
def test_spawn_power_on_false(self):
self.test_spawn_with_network_info(power_on=False)
@@ -14726,6 +15551,9 @@ class LibvirtConnTestCase(test.NoDBTestCase,
accel_info=accel_info, power_on=False)
return instance
+ @mock.patch.object(libvirt_driver.LibvirtDriver,
+ '_register_undefined_instance_details',
+ new=mock.Mock())
@mock.patch.object(libvirt_driver.LibvirtDriver, '_get_guest_xml')
def test_spawn_accels_no_accel_info(self, mock_get_guest_xml):
# accel_info should be passed to get_guest_xml even if it is []
@@ -14736,6 +15564,9 @@ class LibvirtConnTestCase(test.NoDBTestCase,
block_device_info=None, mdevs=mock.ANY,
accel_info=[])
+ @mock.patch.object(libvirt_driver.LibvirtDriver,
+ '_register_undefined_instance_details',
+ new=mock.Mock())
@mock.patch.object(libvirt_driver.LibvirtDriver, '_get_guest_xml')
def test_spawn_accels_with_accel_info(self, mock_get_guest_xml):
# accel_info should be passed to get_guest_xml if it is not []
@@ -14746,6 +15577,9 @@ class LibvirtConnTestCase(test.NoDBTestCase,
block_device_info=None, mdevs=mock.ANY,
accel_info=accel_info)
+ @mock.patch.object(libvirt_driver.LibvirtDriver,
+ '_register_undefined_instance_details',
+ new=mock.Mock())
# Methods called directly by spawn()
@mock.patch.object(libvirt_driver.LibvirtDriver, '_get_guest_xml')
@mock.patch.object(libvirt_driver.LibvirtDriver,
@@ -14793,6 +15627,9 @@ class LibvirtConnTestCase(test.NoDBTestCase,
config_disk.import_file.assert_called_once_with(instance, mock.ANY,
'disk.config')
+ @mock.patch.object(libvirt_driver.LibvirtDriver,
+ '_register_undefined_instance_details',
+ new=mock.Mock())
def test_spawn_without_image_meta(self):
instance_ref = self.test_instance
instance_ref['image_ref'] = uuids.image_ref
@@ -14817,6 +15654,9 @@ class LibvirtConnTestCase(test.NoDBTestCase,
self.assertEqual(['disk', 'disk.local'],
sorted(backend.created_disks.keys()))
+ @mock.patch.object(libvirt_driver.LibvirtDriver,
+ '_register_undefined_instance_details',
+ new=mock.Mock())
def _test_spawn_disks(self, image_ref, block_device_info):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
@@ -14877,6 +15717,9 @@ class LibvirtConnTestCase(test.NoDBTestCase,
# We should have created the root and ephemeral disks
self.assertEqual(['disk', 'disk.local'], disks_created)
+ @mock.patch.object(libvirt_driver.LibvirtDriver,
+ '_register_undefined_instance_details',
+ new=mock.Mock())
def test_spawn_lxc_from_volume(self):
self.flags(virt_type="lxc",
group='libvirt')
@@ -14967,6 +15810,9 @@ class LibvirtConnTestCase(test.NoDBTestCase,
inst_obj.system_metadata.get(
'rootfs_device_name'))
+ @mock.patch.object(libvirt_driver.LibvirtDriver,
+ '_register_undefined_instance_details',
+ new=mock.Mock())
def test_spawn_with_pci_devices(self):
class FakeLibvirtPciDevice(object):
def dettach(self):
@@ -15011,6 +15857,9 @@ class LibvirtConnTestCase(test.NoDBTestCase,
return_value=mock_connection):
drvr.spawn(self.context, instance, image_meta, [], None, {})
+ @mock.patch.object(libvirt_driver.LibvirtDriver,
+ '_register_undefined_instance_details',
+ new=mock.Mock())
@mock.patch('nova.crypto.ensure_vtpm_secret')
@mock.patch.object(hardware, 'get_vtpm_constraint')
@mock.patch(
@@ -15333,8 +16182,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
filename=filename, size=100 * units.Gi, ephemeral_size=mock.ANY,
specified_fs=None)
- @mock.patch.object(nova.virt.libvirt.imagebackend.Image, 'cache')
- def test_create_image_resize_snap_backend(self, mock_cache):
+ def test_create_image_resize_snap_backend(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance(**self.test_instance)
instance.task_state = task_states.RESIZE_FINISH
@@ -15362,7 +16210,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
@mock.patch('nova.privsep.path.utime')
@mock.patch('nova.virt.libvirt.utils.fetch_image')
- @mock.patch('nova.virt.libvirt.utils.create_cow_image')
+ @mock.patch('nova.virt.libvirt.utils.create_image')
def test_create_ephemeral_specified_fs_not_valid(
self, mock_create_cow_image, mock_fetch_image, mock_utime):
CONF.set_override('default_ephemeral_format', 'ext4')
@@ -15378,10 +16226,9 @@ class LibvirtConnTestCase(test.NoDBTestCase,
instance = objects.Instance(**instance_ref)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
image_meta = objects.ImageMeta.from_dict({'disk_format': 'raw'})
- disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
- instance,
- image_meta)
- disk_info['mapping'].pop('disk.local')
+ disk_info = blockinfo.get_disk_info(
+ CONF.libvirt.virt_type, instance, image_meta,
+ block_device_info=block_device_info)
with test.nested(
mock.patch('oslo_concurrency.processutils.execute'),
@@ -15793,9 +16640,10 @@ class LibvirtConnTestCase(test.NoDBTestCase,
self.assertEqual(ip, CONF.my_ip)
@mock.patch.object(libvirt_driver.LOG, 'warning')
- @mock.patch('nova.compute.utils.get_machine_ips')
- def test_check_my_ip(self, mock_ips, mock_log):
- mock_ips.return_value = ['8.8.8.8', '75.75.75.75']
+ def test_check_my_ip(self, mock_log):
+
+ self.libvirt.mock_get_machine_ips.return_value = [
+ '8.8.8.8', '75.75.75.75']
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
drvr._check_my_ip()
mock_log.assert_called_once_with(u'my_ip address (%(my_ip)s) was '
@@ -15805,7 +16653,8 @@ class LibvirtConnTestCase(test.NoDBTestCase,
'my_ip': mock.ANY})
@mock.patch.object(libvirt_driver.LibvirtDriver,
- '_register_instance_machine_type', new=mock.Mock())
+ '_register_all_undefined_instance_details',
+ new=mock.Mock())
def test_init_host_checks_ip(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
with mock.patch.object(drvr, '_check_my_ip') as mock_check:
@@ -15816,6 +16665,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
service_mock = mock.MagicMock()
service_mock.disabled.return_value = False
+ drvr._host._init_events.return_value = None
with test.nested(
mock.patch.object(drvr._host, "_connect",
side_effect=fakelibvirt.make_libvirtError(
@@ -15823,8 +16673,6 @@ class LibvirtConnTestCase(test.NoDBTestCase,
"Failed to connect to host",
error_code=
fakelibvirt.VIR_ERR_INTERNAL_ERROR)),
- mock.patch.object(drvr._host, "_init_events",
- return_value=None),
mock.patch.object(objects.Service, "get_by_compute_host",
return_value=service_mock)):
@@ -15839,6 +16687,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
service_mock = mock.MagicMock()
service_mock.disabled.return_value = False
+ drvr._host._init_events.return_value = None
with test.nested(
mock.patch.object(drvr._host, "_connect",
side_effect=fakelibvirt.make_libvirtError(
@@ -15846,8 +16695,6 @@ class LibvirtConnTestCase(test.NoDBTestCase,
"Failed to connect to host",
error_code=
fakelibvirt.VIR_ERR_INTERNAL_ERROR)),
- mock.patch.object(drvr._host, "_init_events",
- return_value=None),
mock.patch.object(host.Host, "has_min_version",
return_value=True),
mock.patch.object(drvr, "_do_quality_warnings",
@@ -15861,16 +16708,16 @@ class LibvirtConnTestCase(test.NoDBTestCase,
self.assertTrue(service_mock.disabled)
@mock.patch.object(libvirt_driver.LibvirtDriver,
- '_register_instance_machine_type', new=mock.Mock())
+ '_register_all_undefined_instance_details',
+ new=mock.Mock())
def test_service_resume_after_broken_connection(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
service_mock = mock.MagicMock()
service_mock.disabled.return_value = True
+ drvr._host._init_events.return_value = None
with test.nested(
mock.patch.object(drvr._host, "_connect",
return_value=mock.MagicMock()),
- mock.patch.object(drvr._host, "_init_events",
- return_value=None),
mock.patch.object(host.Host, "has_min_version",
return_value=True),
mock.patch.object(drvr, "_do_quality_warnings",
@@ -16029,7 +16876,6 @@ class LibvirtConnTestCase(test.NoDBTestCase,
mock_get.return_value = fake_guest
self.stub_out('oslo_service.loopingcall.FixedIntervalLoopingCall',
lambda *a, **k: FakeLoopingCall())
- self.stub_out('nova.pci.manager.get_instance_pci_devs', lambda *a: [])
drvr.reboot(None, instance, [], 'SOFT')
@@ -16041,14 +16887,12 @@ class LibvirtConnTestCase(test.NoDBTestCase,
mock_get.assert_has_calls([mock.call(instance)] * 2, any_order=True)
self.assertEqual(2, mock_get.call_count)
- @mock.patch.object(pci_manager, 'get_instance_pci_devs')
@mock.patch.object(loopingcall, 'FixedIntervalLoopingCall')
@mock.patch.object(greenthread, 'sleep')
@mock.patch.object(libvirt_driver.LibvirtDriver, '_hard_reboot')
@mock.patch.object(host.Host, '_get_domain')
def test_reboot_same_ids(self, mock_get_domain, mock_hard_reboot,
- mock_sleep, mock_loopingcall,
- mock_get_instance_pci_devs):
+ mock_sleep, mock_loopingcall):
class FakeLoopingCall(object):
def start(self, *a, **k):
return self
@@ -16076,7 +16920,6 @@ class LibvirtConnTestCase(test.NoDBTestCase,
mock_get_domain.return_value = mock_domain
mock_hard_reboot.side_effect = fake_hard_reboot
mock_loopingcall.return_value = FakeLoopingCall()
- mock_get_instance_pci_devs.return_value = []
drvr.reboot(None, instance, [], 'SOFT')
self.assertTrue(self.reboot_hard_reboot_called)
@@ -16274,7 +17117,6 @@ class LibvirtConnTestCase(test.NoDBTestCase,
@mock.patch('oslo_utils.fileutils.ensure_tree')
@mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall')
- @mock.patch('nova.pci.manager.get_instance_pci_devs')
@mock.patch('nova.virt.libvirt.LibvirtDriver._create_guest_with_network')
@mock.patch('nova.virt.libvirt.LibvirtDriver._create_images_and_backing')
@mock.patch('nova.virt.libvirt.LibvirtDriver.'
@@ -16291,7 +17133,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
mock_get_guest_config, mock_get_instance_path,
mock_get_instance_disk_info, mock_create_images_and_backing,
mock_create_domand_and_network,
- mock_get_instance_pci_devs, mock_looping_call, mock_ensure_tree):
+ mock_looping_call, mock_ensure_tree):
"""For a hard reboot, we shouldn't need an additional call to glance
to get the image metadata.
@@ -16337,10 +17179,8 @@ class LibvirtConnTestCase(test.NoDBTestCase,
@mock.patch.object(conn, '_detach_mediated_devices')
@mock.patch.object(conn, '_detach_direct_passthrough_ports')
@mock.patch.object(conn, '_detach_pci_devices')
- @mock.patch.object(pci_manager, 'get_instance_pci_devs',
- return_value='pci devs')
@mock.patch.object(conn._host, 'get_guest', return_value=guest)
- def suspend(mock_get_guest, mock_get_instance_pci_devs,
+ def suspend(mock_get_guest,
mock_detach_pci_devices,
mock_detach_direct_passthrough_ports,
mock_detach_mediated_devices,
@@ -16483,15 +17323,17 @@ class LibvirtConnTestCase(test.NoDBTestCase,
instance.system_metadata)
self.assertTrue(mock_attachDevice.called)
- @mock.patch.object(host.Host,
- 'has_min_version', return_value=True)
- def _test_detach_direct_passthrough_ports(self,
- mock_has_min_version, vif_type):
+ @mock.patch.object(
+ host.Host, 'has_min_version', new=mock.Mock(return_value=True)
+ )
+ def _test_detach_direct_passthrough_ports(
+ self, vif_type, detach_device=True,
+ vnic_type=network_model.VNIC_TYPE_DIRECT):
instance = objects.Instance(**self.test_instance)
expeted_pci_slot = "0000:00:00.0"
network_info = _fake_network_info(self)
- network_info[0]['vnic_type'] = network_model.VNIC_TYPE_DIRECT
+ network_info[0]['vnic_type'] = vnic_type
# some more adjustments for the fake network_info so that
# the correct get_config function will be executed (vif's
# get_config_hw_veb - which is according to the real SRIOV vif)
@@ -16504,32 +17346,55 @@ class LibvirtConnTestCase(test.NoDBTestCase,
instance.info_cache = objects.InstanceInfoCache(
network_info=network_info)
# fill the pci_devices of the instance so that
- # pci_manager.get_instance_pci_devs will not return an empty list
+ # instance.get_instance_pci_devs will not return an empty list
# which will eventually fail the assertion for detachDeviceFlags
expected_pci_device_obj = (
- objects.PciDevice(address=expeted_pci_slot, request_id=None))
+ objects.PciDevice(
+ address=expeted_pci_slot, request_id=None, compute_node_id=42
+ )
+ )
instance.pci_devices = objects.PciDeviceList()
instance.pci_devices.objects = [expected_pci_device_obj]
- domain = FakeVirtDomain()
+ domain = FakeVirtDomain(id=24601, name='Jean Valjean')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
guest = libvirt_guest.Guest(domain)
- with mock.patch.object(drvr, '_detach_pci_devices') as mock_detach_pci:
+ with mock.patch.object(
+ drvr, '_detach_pci_devices'
+ ) as mock_detach_pci, mock.patch.object(
+ drvr, 'detach_interface'
+ ) as mock_detach_interface:
drvr._detach_direct_passthrough_ports(
self.context, instance, guest)
- mock_detach_pci.assert_called_once_with(
- guest, [expected_pci_device_obj])
+ if detach_device:
+ mock_detach_pci.assert_called_once_with(
+ guest, [expected_pci_device_obj])
+ else:
+ mock_detach_interface.assert_called_once()
- def test_detach_direct_passthrough_ports_interface_interface_hostdev(self):
+ def test_detach_direct_passthrough_ports_ovs_hw_offload(self):
# Note: test detach_direct_passthrough_ports method for vif with config
# LibvirtConfigGuestInterface
- self._test_detach_direct_passthrough_ports(vif_type="hw_veb")
+ self._test_detach_direct_passthrough_ports("ovs", detach_device=False)
- def test_detach_direct_passthrough_ports_interface_pci_hostdev(self):
+ def test_detach_direct_passthrough_ports_sriov_nic_agent(self):
+ # Note: test detach_direct_passthrough_ports method for vif with config
+ # LibvirtConfigGuestInterface
+ self._test_detach_direct_passthrough_ports(
+ "hw_veb", detach_device=False
+ )
+
+ def test_detach_direct_physical_passthrough_ports_sriov_nic_agent(self):
+ self._test_detach_direct_passthrough_ports(
+ "hostdev_physical",
+ vnic_type=network_model.VNIC_TYPE_DIRECT_PHYSICAL
+ )
+
+ def test_detach_direct_passthrough_ports_infiniband(self):
# Note: test detach_direct_passthrough_ports method for vif with config
# LibvirtConfigGuestHostdevPCI
- self._test_detach_direct_passthrough_ports(vif_type="ib_hostdev")
+ self._test_detach_direct_passthrough_ports("ib_hostdev")
@mock.patch.object(host.Host, 'has_min_version', return_value=True)
@mock.patch.object(FakeVirtDomain, 'detachDeviceFlags')
@@ -16539,9 +17404,10 @@ class LibvirtConnTestCase(test.NoDBTestCase,
network_info = _fake_network_info(self, 2)
+ direct_physical = network_model.VNIC_TYPE_DIRECT_PHYSICAL
for network_info_inst in network_info:
- network_info_inst['vnic_type'] = network_model.VNIC_TYPE_DIRECT
- network_info_inst['type'] = "hw_veb"
+ network_info_inst['vnic_type'] = direct_physical
+ network_info_inst['type'] = "hostdev_physical"
network_info_inst['details'] = dict(vlan="2145")
network_info_inst['address'] = "fa:16:3e:96:2a:48"
@@ -16551,7 +17417,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
instance.info_cache = objects.InstanceInfoCache(
network_info=network_info)
# fill the pci_devices of the instance so that
- # pci_manager.get_instance_pci_devs will not return an empty list
+ # instance.get_instance_pci_devs will not return an empty list
# which will eventually fail the assertion for detachDeviceFlags
instance.pci_devices = objects.PciDeviceList()
instance.pci_devices.objects = [
@@ -16606,8 +17472,8 @@ class LibvirtConnTestCase(test.NoDBTestCase,
mock.patch.object(drvr, '_create_guest_with_network',
return_value=guest),
mock.patch.object(drvr, '_attach_pci_devices'),
- mock.patch.object(pci_manager, 'get_instance_pci_devs',
- return_value='fake_pci_devs'),
+ mock.patch('nova.objects.Instance.get_pci_devices',
+ return_value='fake_pci_devs'),
mock.patch.object(utils, 'get_image_from_system_metadata'),
mock.patch.object(guest, 'sync_guest_time'),
mock.patch.object(drvr, '_wait_for_running',
@@ -17358,12 +18224,11 @@ class LibvirtConnTestCase(test.NoDBTestCase,
got = drvr._get_cpu_info()
self.assertEqual(want, got)
- @mock.patch.object(pci_utils, 'get_ifname_by_pci_address',
- return_value='ens1')
@mock.patch.object(host.Host, 'list_pci_devices',
return_value=['pci_0000_04_00_3', 'pci_0000_04_10_7',
'pci_0000_04_11_7'])
- def test_get_pci_passthrough_devices(self, mock_list, mock_get_ifname):
+ def test_get_pci_passthrough_devices(self, mock_list):
+ pci_utils.get_ifname_by_pci_address.return_value = 'ens1'
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
@@ -17397,7 +18262,10 @@ class LibvirtConnTestCase(test.NoDBTestCase,
"vendor_id": '8086',
"dev_type": fields.PciDeviceType.SRIOV_PF,
"phys_function": None,
- "numa_node": None},
+ "numa_node": None,
+ # value defined in the LibvirtFixture
+ "mac_address": "52:54:00:1e:59:c6",
+ },
{
"dev_id": "pci_0000_04_10_7",
"domain": 0,
@@ -17433,7 +18301,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
# The first call for every VF is to determine parent_ifname and
# the second call to determine the MAC address.
- mock_get_ifname.assert_has_calls([
+ pci_utils.get_ifname_by_pci_address.assert_has_calls([
mock.call('0000:04:10.7', pf_interface=True),
mock.call('0000:04:11.7', pf_interface=True),
])
@@ -19198,8 +20066,10 @@ class LibvirtConnTestCase(test.NoDBTestCase,
instance = objects.Instance(**self.test_instance)
instance.vm_state = vm_states.BUILDING
- vifs = [{'id': uuids.vif_1, 'active': False},
- {'id': uuids.vif_2, 'active': False}]
+ vifs = [
+ network_model.VIF(id=uuids.vif_1, active=False),
+ network_model.VIF(id=uuids.vif_2, active=False)
+ ]
@mock.patch.object(drvr, 'plug_vifs')
@mock.patch.object(drvr, '_create_guest')
@@ -19396,6 +20266,23 @@ class LibvirtConnTestCase(test.NoDBTestCase,
events = drvr._get_neutron_events(network_info)
self.assertEqual([('network-vif-plugged', '1')], events)
+ def test_get_neutron_events_remote_managed(self):
+ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ network_info = [
+ network_model.VIF(
+ id=uuids.vif_1,
+ vnic_type=network_model.VNIC_TYPE_REMOTE_MANAGED),
+ network_model.VIF(
+ id=uuids.vif_2,
+ vnic_type=network_model.VNIC_TYPE_REMOTE_MANAGED,
+ active=True),
+ ]
+ events = drvr._get_neutron_events(network_info)
+ # For VNIC_TYPE_REMOTE_MANAGED events are only bind-time currently.
+ # Until this changes, they need to be filtered out to avoid waiting
+ # for them unnecessarily.
+ self.assertEqual([], events)
+
def test_unplug_vifs_ignores_errors(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI())
with mock.patch.object(drvr, 'vif_driver') as vif_driver:
@@ -19827,11 +20714,12 @@ class LibvirtConnTestCase(test.NoDBTestCase,
mock.patch.object(drvr._conn, 'defineXML', create=True),
mock.patch('nova.virt.libvirt.utils.get_disk_size'),
mock.patch('nova.virt.libvirt.utils.get_disk_backing_file'),
- mock.patch('nova.virt.libvirt.utils.create_cow_image'),
+ mock.patch('nova.virt.libvirt.utils.create_image'),
mock.patch('nova.virt.libvirt.utils.extract_snapshot'),
- mock.patch.object(drvr, '_set_quiesced')
+ mock.patch.object(drvr, '_set_quiesced'),
+ mock.patch.object(drvr, '_can_quiesce')
) as (mock_define, mock_size, mock_backing, mock_create_cow,
- mock_snapshot, mock_quiesce):
+ mock_snapshot, mock_quiesce, mock_can_quiesce):
xmldoc = "<domain/>"
srcfile = "/first/path"
@@ -19846,7 +20734,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
guest = libvirt_guest.Guest(mock_dom)
if not can_quiesce:
- mock_quiesce.side_effect = (
+ mock_can_quiesce.side_effect = (
exception.InstanceQuiesceNotSupported(
instance_id=self.test_instance['id'], reason='test'))
@@ -19869,7 +20757,8 @@ class LibvirtConnTestCase(test.NoDBTestCase,
mock_size.assert_called_once_with(srcfile, format="qcow2")
mock_backing.assert_called_once_with(srcfile, basename=False,
format="qcow2")
- mock_create_cow.assert_called_once_with(bckfile, dltfile, 1004009)
+ mock_create_cow.assert_called_once_with(
+ dltfile, 'qcow2', 1004009, backing_file=bckfile)
mock_chown.assert_called_once_with(dltfile, uid=os.getuid())
mock_snapshot.assert_called_once_with(dltfile, "qcow2",
dstfile, "qcow2")
@@ -19877,6 +20766,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
mock_define.assert_called_once_with(xmldoc)
mock_quiesce.assert_any_call(mock.ANY, self.test_instance,
mock.ANY, True)
+
if can_quiesce:
mock_quiesce.assert_any_call(mock.ANY, self.test_instance,
mock.ANY, False)
@@ -19980,7 +20870,8 @@ class LibvirtConnTestCase(test.NoDBTestCase,
driver.init_host, 'wibble')
@mock.patch.object(libvirt_driver.LibvirtDriver,
- '_register_instance_machine_type', new=mock.Mock())
+ '_register_all_undefined_instance_details',
+ new=mock.Mock())
@mock.patch.object(fakelibvirt.Connection, 'getVersion',
return_value=versionutils.convert_version_to_int(
libvirt_driver.MIN_VIRTUOZZO_VERSION))
@@ -20101,7 +20992,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
**fake_block_device.FakeDbBlockDeviceDict(
{'id': 0,
'source_type': 'volume', 'destination_type': 'volume',
- 'device_name': '/dev/sda'}))
+ 'device_name': '/dev/sda', 'boot_index': 0}))
info = {'block_device_mapping': driver_block_device.convert_volumes(
[bdm])}
info['block_device_mapping'][0]['connection_info'] = conn_info
@@ -20211,8 +21102,10 @@ class LibvirtConnTestCase(test.NoDBTestCase,
drvr._get_disk_config_image_type())
self.assertEqual(2, drvr.image_backend.by_name.call_count)
- call1 = mock.call(instance, 'disk.config', 'rbd')
- call2 = mock.call(instance, 'disk.config', 'flat')
+ call1 = mock.call(instance, 'disk.config', 'rbd',
+ disk_info_mapping=disk_mapping['disk.config'])
+ call2 = mock.call(instance, 'disk.config', 'flat',
+ disk_info_mapping=disk_mapping['disk.config'])
drvr.image_backend.by_name.assert_has_calls([call1, call2])
self.assertEqual(mock.sentinel.diskconfig, diskconfig)
@@ -20255,7 +21148,6 @@ class LibvirtConnTestCase(test.NoDBTestCase,
guest = mock.Mock()
with test.nested(
- mock.patch.object(pci_manager, 'get_instance_pci_devs'),
mock.patch.object(drvr, '_attach_pci_devices'),
mock.patch.object(drvr, '_attach_direct_passthrough_ports'),
):
@@ -20691,7 +21583,7 @@ class TestUpdateProviderTree(test.NoDBTestCase):
'min_unit': 1,
'max_unit': self.vcpus,
'step_size': 1,
- 'allocation_ratio': 16.0,
+ 'allocation_ratio': 4.0,
'reserved': 0,
},
orc.PCPU: {
@@ -20707,7 +21599,7 @@ class TestUpdateProviderTree(test.NoDBTestCase):
'min_unit': 1,
'max_unit': self.memory_mb,
'step_size': 1,
- 'allocation_ratio': 1.5,
+ 'allocation_ratio': 1.0,
'reserved': 512,
},
orc.DISK_GB: {
@@ -21435,6 +22327,7 @@ class LibvirtDriverTestCase(test.NoDBTestCase, TraitsComparisonMixin):
self.flags(sysinfo_serial="none", group="libvirt")
self.flags(instances_path=self.useFixture(fixtures.TempDir()).path)
self.useFixture(nova_fixtures.LibvirtFixture())
+ self.useFixture(nova_fixtures.CGroupsFixture())
os_vif.initialize()
self.drvr = libvirt_driver.LibvirtDriver(
@@ -21530,6 +22423,8 @@ class LibvirtDriverTestCase(test.NoDBTestCase, TraitsComparisonMixin):
context.get_admin_context(), ins_ref, '10.0.0.2',
flavor_obj, None)
+ @mock.patch('nova.virt.libvirt.driver.LibvirtDriver.'
+ '_cleanup_failed_instance_base')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver.unplug_vifs')
@mock.patch('nova.virt.libvirt.utils.save_and_migrate_vtpm_dir')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver.'
@@ -21546,7 +22441,7 @@ class LibvirtDriverTestCase(test.NoDBTestCase, TraitsComparisonMixin):
self, ctxt, flavor_obj, mock_execute, mock_exists, mock_rename,
mock_is_shared, mock_get_host_ip, mock_destroy,
mock_get_disk_info, mock_vtpm, mock_unplug_vifs,
- block_device_info=None, params_for_instance=None):
+ mock_cleanup, block_device_info=None, params_for_instance=None):
"""Test for nova.virt.libvirt.driver.LivirtConnection
.migrate_disk_and_power_off.
"""
@@ -21561,6 +22456,8 @@ class LibvirtDriverTestCase(test.NoDBTestCase, TraitsComparisonMixin):
ctxt, instance, '10.0.0.2', flavor_obj, None,
block_device_info=block_device_info)
+ mock_cleanup.assert_called_once()
+ mock_cleanup.reset_mock()
self.assertEqual(out, disk_info_text)
mock_vtpm.assert_called_with(
instance.uuid, mock.ANY, mock.ANY, '10.0.0.2', mock.ANY, mock.ANY)
@@ -21571,6 +22468,7 @@ class LibvirtDriverTestCase(test.NoDBTestCase, TraitsComparisonMixin):
ctxt, instance, '10.0.0.1', flavor_obj, None,
block_device_info=block_device_info)
+ mock_cleanup.assert_called_once()
self.assertEqual(out, disk_info_text)
mock_vtpm.assert_called_with(
instance.uuid, mock.ANY, mock.ANY, '10.0.0.1', mock.ANY, mock.ANY)
@@ -21816,11 +22714,8 @@ class LibvirtDriverTestCase(test.NoDBTestCase, TraitsComparisonMixin):
self.drvr.migrate_disk_and_power_off,
'ctx', instance, '10.0.0.1', flavor_obj, None)
- @mock.patch('nova.virt.libvirt.driver.LibvirtDriver'
- '._get_instance_disk_info')
@mock.patch('nova.virt.driver.block_device_info_get_ephemerals')
- def test_migrate_disk_and_power_off_resize_error_eph(self, mock_get,
- mock_get_disk_info):
+ def test_migrate_disk_and_power_off_resize_error_eph(self, mock_get):
mappings = [
{
'device_name': '/dev/sdb4',
@@ -21867,7 +22762,6 @@ class LibvirtDriverTestCase(test.NoDBTestCase, TraitsComparisonMixin):
# Old flavor, eph is 20, real disk is 3, target is 2, fail
flavor = {'root_gb': 10, 'ephemeral_gb': 2}
flavor_obj = objects.Flavor(**flavor)
- mock_get_disk_info.return_value = fake_disk_info_json(instance)
self.assertRaises(
exception.InstanceFaultRollback,
@@ -22468,8 +23362,8 @@ class LibvirtDriverTestCase(test.NoDBTestCase, TraitsComparisonMixin):
self.assertFalse(drvr.image_backend.remove_snap.called)
@mock.patch.object(shutil, 'rmtree')
- def test_cleanup_failed_migration(self, mock_rmtree):
- self.drvr._cleanup_failed_migration('/fake/inst')
+ def test_cleanup_failed_instance_base(self, mock_rmtree):
+ self.drvr._cleanup_failed_instance_base('/fake/inst')
mock_rmtree.assert_called_once_with('/fake/inst')
@mock.patch.object(libvirt_driver.LibvirtDriver, '_cleanup_resize')
@@ -22828,6 +23722,9 @@ class LibvirtDriverTestCase(test.NoDBTestCase, TraitsComparisonMixin):
}
instance = self._create_instance(params=inst_params)
+ image_meta = objects.ImageMeta.from_dict({})
+ disk_info = blockinfo.get_disk_info(
+ CONF.libvirt.virt_type, instance, image_meta)
disk_images = {'image_id': instance.image_ref}
instance_dir = libvirt_utils.get_instance_path(instance)
disk_path = os.path.join(instance_dir, 'disk')
@@ -22847,7 +23744,8 @@ class LibvirtDriverTestCase(test.NoDBTestCase, TraitsComparisonMixin):
]
drvr._create_and_inject_local_root(
- self.context, instance, False, '', disk_images, None, None)
+ self.context, instance, disk_info['mapping'], False, '',
+ disk_images, None, None)
mock_fetch_calls = [
mock.call(test.MatchType(nova.virt.libvirt.imagebackend.Qcow2),
@@ -22930,9 +23828,13 @@ class LibvirtDriverTestCase(test.NoDBTestCase, TraitsComparisonMixin):
# config_drive is True by default, configdrive.required_by()
# returns True
instance_ref = self._create_instance()
+ image_meta = objects.ImageMeta.from_dict({})
+ disk_info = blockinfo.get_disk_info(
+ CONF.libvirt.virt_type, instance_ref, image_meta)
disk_images = {'image_id': None}
- drvr._create_and_inject_local_root(self.context, instance_ref, False,
+ drvr._create_and_inject_local_root(self.context, instance_ref,
+ disk_info['mapping'], False,
'', disk_images, get_injection_info(),
None)
self.assertFalse(mock_inject.called)
@@ -22952,6 +23854,9 @@ class LibvirtDriverTestCase(test.NoDBTestCase, TraitsComparisonMixin):
mock_image.get.return_value = {'locations': [], 'disk_format': 'raw'}
instance = self._create_instance()
+ image_meta = objects.ImageMeta.from_dict({})
+ disk_info = blockinfo.get_disk_info(
+ CONF.libvirt.virt_type, instance, image_meta)
disk_images = {'image_id': 'foo'}
self.flags(images_type='rbd', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
@@ -22962,6 +23867,7 @@ class LibvirtDriverTestCase(test.NoDBTestCase, TraitsComparisonMixin):
mock_fetch.reset_mock()
drvr._create_and_inject_local_root(self.context,
instance,
+ disk_info['mapping'],
False,
'',
disk_images,
@@ -23855,7 +24761,7 @@ class LibvirtDriverTestCase(test.NoDBTestCase, TraitsComparisonMixin):
def test__detach_with_retry_timeout_retry_succeeds(
self, state, mock_event_wait
):
- """Test that that a live detach times out while waiting for the libvirt
+ """Test that a live detach times out while waiting for the libvirt
event but then the retry succeeds.
"""
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
@@ -23910,7 +24816,7 @@ class LibvirtDriverTestCase(test.NoDBTestCase, TraitsComparisonMixin):
def test__detach_with_retry_timeout_retry_unplug_in_progress(
self, mock_event_wait
):
- """Test that that a live detach times out while waiting for the libvirt
+ """Test that a live detach times out while waiting for the libvirt
event but then the retry gets a unplug already in progress error from
libvirt, which it ignores, then the detach finishes and the event is
received.
@@ -23990,10 +24896,10 @@ class LibvirtDriverTestCase(test.NoDBTestCase, TraitsComparisonMixin):
def test__detach_with_retry_timeout_run_out_of_retries(
self, state, mock_event_wait
):
- """Test that that a live detach times out while waiting for the libvirt
+ """Test that a live detach times out while waiting for the libvirt
event at every attempt so the driver runs out of retry attempts.
"""
- # decreased the number to simplyfy the test
+ # decreased the number to simplify the test
self.flags(group='libvirt', device_detach_attempts=2)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
@@ -24321,7 +25227,7 @@ class LibvirtDriverTestCase(test.NoDBTestCase, TraitsComparisonMixin):
self.assertEqual('raw', disk.image_type)
# Assert that the root rescue disk was created as the default type
- self.assertIsNone(disks['disk.rescue'].image_type)
+ self.assertEqual('default', disks['disk.rescue'].image_type)
# We expect the generated domain to contain disk.rescue and
# disk, in that order
@@ -24393,7 +25299,7 @@ class LibvirtDriverTestCase(test.NoDBTestCase, TraitsComparisonMixin):
self.assertEqual('raw', disk.image_type)
# Assert that the root rescue disk was created as the default type
- self.assertIsNone(disks['disk.rescue'].image_type)
+ self.assertEqual('default', disks['disk.rescue'].image_type)
# We expect the generated domain to contain disk.rescue, disk, and
# disk.config.rescue in that order
@@ -24631,7 +25537,8 @@ class LibvirtDriverTestCase(test.NoDBTestCase, TraitsComparisonMixin):
'id': 1,
'source_type': 'volume',
'destination_type': 'volume',
- 'device_name': '/dev/vda'}))
+ 'device_name': '/dev/vda',
+ 'boot_index': 0}))
bdms = driver_block_device.convert_volumes([bdm])
block_device_info = {'root_device_name': '/dev/vda',
'ephemerals': [],
@@ -25317,9 +26224,7 @@ class LibvirtDriverTestCase(test.NoDBTestCase, TraitsComparisonMixin):
}
self._test_get_gpu_inventories(drvr, expected, ['nvidia-11'])
- @mock.patch('nova.virt.libvirt.driver.LibvirtDriver'
- '._get_mdev_capable_devices')
- def test_get_gpu_inventories_with_two_types(self, get_mdev_capable_devs):
+ def test_get_gpu_inventories_with_two_types(self):
self.flags(enabled_mdev_types=['nvidia-11', 'nvidia-12'],
group='devices')
# we need to call the below again to ensure the updated
@@ -25952,7 +26857,8 @@ class LibvirtDriverTestCase(test.NoDBTestCase, TraitsComparisonMixin):
drvr._get_existing_mdevs_not_assigned(parent=None))
@mock.patch.object(libvirt_driver.LibvirtDriver,
- '_register_instance_machine_type', new=mock.Mock())
+ '_register_all_undefined_instance_details',
+ new=mock.Mock())
@mock.patch('nova.compute.utils.get_machine_ips',
new=mock.Mock(return_value=[]))
@mock.patch.object(nova.privsep.libvirt, 'create_mdev')
@@ -26668,12 +27574,13 @@ class LibvirtDriverTestCase(test.NoDBTestCase, TraitsComparisonMixin):
mock_execute.assert_called_once_with('qemu-img', 'rebase',
'-b', '', 'disk')
+ @mock.patch('nova.objects.instance.Instance.save')
@mock.patch('nova.objects.instance.InstanceList.get_by_host')
@mock.patch('nova.virt.libvirt.host.Host.get_hostname',
new=mock.Mock(return_value=mock.sentinel.hostname))
@mock.patch('nova.context.get_admin_context', new=mock.Mock())
def test_register_machine_type_already_registered_image_metadata(
- self, mock_get_by_host
+ self, mock_get_by_host, mock_instance_save,
):
instance = self._create_instance(
params={
@@ -26683,7 +27590,14 @@ class LibvirtDriverTestCase(test.NoDBTestCase, TraitsComparisonMixin):
}
)
mock_get_by_host.return_value = [instance]
- self.drvr._register_instance_machine_type()
+
+ # We only care about hw_machine_type for this test
+ with mock.patch(
+ 'nova.virt.libvirt.driver.REGISTER_IMAGE_PROPERTY_DEFAULTS',
+ ['hw_machine_type']
+ ):
+ self.drvr._register_all_undefined_instance_details()
+
# Assert that we don't overwrite the existing type
self.assertEqual(
'existing_type',
@@ -26693,6 +27607,7 @@ class LibvirtDriverTestCase(test.NoDBTestCase, TraitsComparisonMixin):
'existing_type',
instance.system_metadata.get('image_hw_machine_type')
)
+ mock_instance_save.assert_not_called()
@mock.patch('nova.objects.instance.Instance.save')
@mock.patch('nova.objects.instance.InstanceList.get_by_host')
@@ -26705,7 +27620,14 @@ class LibvirtDriverTestCase(test.NoDBTestCase, TraitsComparisonMixin):
):
instance = self._create_instance()
mock_get_by_host.return_value = [instance]
- self.drvr._register_instance_machine_type()
+
+ # We only care about hw_machine_type for this test
+ with mock.patch(
+ 'nova.virt.libvirt.driver.REGISTER_IMAGE_PROPERTY_DEFAULTS',
+ ['hw_machine_type']
+ ):
+ self.drvr._register_all_undefined_instance_details()
+
mock_instance_save.assert_called_once()
self.assertEqual(
'conf_type',
@@ -26716,6 +27638,172 @@ class LibvirtDriverTestCase(test.NoDBTestCase, TraitsComparisonMixin):
instance.system_metadata.get('image_hw_machine_type')
)
+ @mock.patch('nova.virt.libvirt.driver.LOG.exception')
+ @mock.patch('nova.objects.instance.InstanceList.get_by_host')
+ @mock.patch('nova.virt.libvirt.host.Host.get_hostname', new=mock.Mock())
+ def test_register_all_undefined_details_unknown_failure(
+ self, mock_get_by_host, mock_log_exc
+ ):
+ instance = self._create_instance()
+ mock_get_by_host.return_value = [instance]
+
+ # Assert that we swallow anything raised below us
+ with mock.patch.object(
+ self.drvr,
+ '_register_undefined_instance_details',
+ side_effect=test.TestingException()
+ ):
+ self.drvr._register_all_undefined_instance_details()
+
+ # Assert that we logged the failure
+ self.assertEqual(1, mock_log_exc.call_count)
+ self.assertIn('Ignoring unknown failure while attempting '
+ 'to save the defaults for unregistered image properties',
+ mock_log_exc.call_args.args[0])
+
+ @mock.patch('nova.virt.libvirt.driver.LOG.exception')
+ @mock.patch('nova.objects.instance.Instance.save')
+ @mock.patch('nova.virt.libvirt.host.Host.get_guest', new=mock.Mock())
+ @mock.patch('nova.objects.block_device.BlockDeviceMappingList.'
+ 'get_by_instance_uuid')
+ @mock.patch('nova.objects.instance.InstanceList.get_by_host')
+ @mock.patch('nova.virt.libvirt.host.Host.get_hostname', new=mock.Mock())
+ def test_register_all_undefined_details_unknown_failure_finding_default(
+ self, mock_get_by_host, mock_get_bdms, mock_save, mock_log_exc
+ ):
+ instance = self._create_instance()
+ mock_get_by_host.return_value = [instance]
+ mock_get_bdms.return_value = []
+
+ # Assert that we swallow anything raised below us
+ with mock.patch.object(
+ self.drvr,
+ '_find_default_for_image_property',
+ side_effect=test.TestingException()
+ ):
+ self.drvr._register_all_undefined_instance_details()
+
+ # Assert that we logged the failures (once for each unregistered
+ # image property)
+ self.assertEqual(len(libvirt_driver.REGISTER_IMAGE_PROPERTY_DEFAULTS),
+ mock_log_exc.call_count)
+ self.assertIn('Ignoring unknown failure while attempting '
+ 'to find the default of',
+ mock_log_exc.call_args.args[0])
+
+ # Assert that we updated the instance
+ mock_save.assert_called_once_with()
+
+ @mock.patch('nova.objects.instance.Instance.save',
+ new=mock.NonCallableMock())
+ @mock.patch('nova.virt.libvirt.host.Host.get_guest',
+ new=mock.NonCallableMock())
+ @mock.patch('nova.objects.block_device.BlockDeviceMappingList.'
+ 'get_by_instance_uuid', new=mock.NonCallableMock())
+ def test_register_undefined_instance_details_nothing_to_register(self):
+ instance = self._create_instance()
+
+ # Set a value for all REGISTER_IMAGE_PROPERTY_DEFAULTS
+ for p in libvirt_driver.REGISTER_IMAGE_PROPERTY_DEFAULTS:
+ instance.system_metadata[f"image_{p}"] = 'foo'
+
+ # We should not have pulled bdms or updated the instance
+ self.drvr._register_undefined_instance_details(self.context, instance)
+
+ @mock.patch('nova.objects.instance.Instance.save')
+ @mock.patch('nova.virt.libvirt.host.Host.get_guest')
+ @mock.patch('nova.objects.block_device.BlockDeviceMappingList.'
+ 'get_by_instance_uuid')
+ def test_register_undefined_instance_details_disk_info_and_guest_config(
+ self, mock_get_bdms, mock_get_guest, mock_save
+ ):
+ instance = self._create_instance()
+ mock_get_bdms.return_value = []
+
+ # Test all props unregistered
+ with mock.patch.object(
+ self.drvr,
+ '_find_default_for_image_property'
+ ) as mock_find:
+ self.drvr._register_undefined_instance_details(self.context,
+ instance)
+ # We should have pulled bdms
+ mock_get_bdms.assert_called_once_with(self.context, instance.uuid)
+ # We should have pulled disk_info
+ self.assertIsNotNone(mock_find.call_args.args[2])
+ # We should have pulled guest config
+ mock_get_guest.return_value.get_config.assert_called_once_with()
+ self.assertIsNotNone(mock_find.call_args.args[3])
+
+ # Set one of ['hw_disk_bus', 'hw_cdrom_bus']
+ # Set one of ['hw_pointer_model', 'hw_input_bus']
+ mock_get_bdms.reset_mock()
+ mock_get_guest.reset_mock()
+ instance.system_metadata['image_hw_disk_bus'] = 'scsi'
+ instance.system_metadata['image_hw_pointer_model'] = None
+ with mock.patch.object(
+ self.drvr,
+ '_find_default_for_image_property'
+ ) as mock_find:
+ self.drvr._register_undefined_instance_details(self.context,
+ instance)
+ # We should have pulled bdms
+ mock_get_bdms.assert_called_once_with(self.context, instance.uuid)
+ # We should have pulled disk_info
+ self.assertIsNotNone(mock_find.call_args.args[2])
+ # We should have pulled guest config
+ mock_get_guest.return_value.get_config.assert_called_once_with()
+ self.assertIsNotNone(mock_find.call_args.args[3])
+
+ # Set the other, now we have both ['hw_disk_bus', 'hw_cdrom_bus']
+ # Set the other, now we have both ['hw_pointer_model', 'hw_input_bus']
+ mock_get_bdms.reset_mock()
+ mock_get_guest.reset_mock()
+ instance.system_metadata['image_hw_cdrom_bus'] = 'scsi'
+ instance.system_metadata['image_hw_input_bus'] = None
+ with mock.patch.object(
+ self.drvr,
+ '_find_default_for_image_property'
+ ) as mock_find:
+ self.drvr._register_undefined_instance_details(self.context,
+ instance)
+ # We should not have pulled bdms at all
+ mock_get_bdms.assert_not_called()
+ # And disk_info should not have been pulled
+ self.assertIsNone(mock_find.call_args.args[2])
+ # We should not have pulled guest config
+ mock_get_guest.return_value.assert_not_called()
+ self.assertIsNone(mock_find.call_args.args[3])
+
+ def test_set_features_windows(self):
+ self.flags(virt_type='kvm', group='libvirt')
+ guest = vconfig.LibvirtConfigGuest()
+ self.drvr._set_features(
+ guest, 'windows',
+ objects.ImageMeta(
+ properties=objects.ImageMetaProps()
+ ),
+ objects.Flavor(extra_specs={})
+ )
+ features = guest.features
+ hv = None
+ for feature in features:
+ if feature.root_name == 'hyperv':
+ hv = feature
+ self.assertTrue(hv.relaxed)
+ self.assertTrue(hv.vapic)
+ self.assertTrue(hv.spinlocks)
+ self.assertEqual(8191, hv.spinlock_retries)
+ self.assertTrue(hv.vpindex)
+ self.assertTrue(hv.runtime)
+ self.assertTrue(hv.synic)
+ self.assertTrue(hv.reset)
+ self.assertTrue(hv.frequencies)
+ self.assertTrue(hv.reenlightenment)
+ self.assertTrue(hv.tlbflush)
+ self.assertTrue(hv.ipi)
+ self.assertTrue(hv.evmcs)
+
class LibvirtVolumeUsageTestCase(test.NoDBTestCase):
"""Test for LibvirtDriver.get_all_volume_usage."""
@@ -27786,7 +28874,7 @@ class _BaseSnapshotTests(test.NoDBTestCase):
@mock.patch.object(host.Host, '_get_domain')
@mock.patch('nova.virt.libvirt.utils.get_disk_size',
new=mock.Mock(return_value=0))
- @mock.patch('nova.virt.libvirt.utils.create_cow_image',
+ @mock.patch('nova.virt.libvirt.utils.create_image',
new=mock.Mock())
@mock.patch('nova.virt.libvirt.utils.get_disk_backing_file',
new=mock.Mock(return_value=None))
@@ -28112,13 +29200,11 @@ class LVMSnapshotTests(_BaseSnapshotTests):
new=mock.Mock(return_value=None))
@mock.patch('nova.virt.libvirt.utils.get_disk_type_from_path',
new=mock.Mock(return_value='lvm'))
- @mock.patch('nova.virt.libvirt.utils.file_open',
- side_effect=[io.BytesIO(b''), io.BytesIO(b'')])
@mock.patch.object(libvirt_driver.imagebackend.images,
'convert_image')
@mock.patch.object(libvirt_driver.imagebackend.lvm, 'volume_info')
def _test_lvm_snapshot(self, disk_format, mock_volume_info,
- mock_convert_image, mock_file_open):
+ mock_convert_image):
self.flags(images_type='lvm',
images_volume_group='nova-vg', group='libvirt')
diff --git a/nova/tests/unit/virt/libvirt/test_guest.py b/nova/tests/unit/virt/libvirt/test_guest.py
index 70d438d816..5b181b8f06 100644
--- a/nova/tests/unit/virt/libvirt/test_guest.py
+++ b/nova/tests/unit/virt/libvirt/test_guest.py
@@ -14,7 +14,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from oslo_service import fixture as service_fixture
from oslo_utils import encodeutils
@@ -403,9 +404,21 @@ class GuestTestCase(test.NoDBTestCase):
self.assertIsNotNone(
self.guest.get_interface_by_cfg(
cfg, from_persistent_config=True))
+ cfg = vconfig.LibvirtConfigGuestInterface()
+ # NOTE(sean-k-mooney): a default constructed object is not valid
+ # to pass to get_interface_by_cfg as so we just modify the xml to
+ # make it not match
+ cfg.parse_str("""
+ <interface type="wont_match">
+ <mac address="fa:16:3e:f9:af:ae"/>
+ <model type="virtio"/>
+ <driver name="qemu"/>
+ <source bridge="qbr84008d03-11"/>
+ <target dev="tap84008d03-11"/>
+ </interface>""")
self.assertIsNone(
self.guest.get_interface_by_cfg(
- vconfig.LibvirtConfigGuestInterface(),
+ cfg,
from_persistent_config=True))
self.domain.XMLDesc.assert_has_calls(
[
@@ -1040,3 +1053,25 @@ class JobInfoTestCase(test.NoDBTestCase):
mock_stats.assert_called_once_with()
mock_info.assert_called_once_with()
+
+ @mock.patch.object(fakelibvirt.virDomain, "jobInfo")
+ @mock.patch.object(fakelibvirt.virDomain, "jobStats")
+ def test_job_stats_no_ram(self, mock_stats, mock_info):
+ mock_stats.side_effect = fakelibvirt.make_libvirtError(
+ fakelibvirt.libvirtError,
+ "internal error: migration was active, but no RAM info was set",
+ error_code=fakelibvirt.VIR_ERR_INTERNAL_ERROR,
+ error_message="migration was active, but no RAM info was set")
+
+ info = self.guest.get_job_info()
+
+ self.assertIsInstance(info, libvirt_guest.JobInfo)
+ self.assertEqual(fakelibvirt.VIR_DOMAIN_JOB_NONE, info.type)
+ self.assertEqual(0, info.time_elapsed)
+ self.assertEqual(0, info.time_remaining)
+ self.assertEqual(0, info.memory_total)
+ self.assertEqual(0, info.memory_processed)
+ self.assertEqual(0, info.memory_remaining)
+
+ mock_stats.assert_called_once_with()
+ self.assertFalse(mock_info.called)
diff --git a/nova/tests/unit/virt/libvirt/test_host.py b/nova/tests/unit/virt/libvirt/test_host.py
index 192909d721..a76dc83105 100644
--- a/nova/tests/unit/virt/libvirt/test_host.py
+++ b/nova/tests/unit/virt/libvirt/test_host.py
@@ -15,11 +15,12 @@
# under the License.
import os
+from unittest import mock
+import ddt
import eventlet
from eventlet import greenthread
from eventlet import tpool
-import mock
from oslo_serialization import jsonutils
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import uuidutils
@@ -71,11 +72,10 @@ class HostTestCase(test.NoDBTestCase):
self.useFixture(nova_fixtures.LibvirtFixture())
self.host = host.Host("qemu:///system")
- @mock.patch("nova.virt.libvirt.host.Host._init_events")
- def test_repeat_initialization(self, mock_init_events):
+ def test_repeat_initialization(self):
for i in range(3):
self.host.initialize()
- mock_init_events.assert_called_once_with()
+ self.host._init_events.assert_called_once_with()
@mock.patch.object(fakelibvirt.virConnect, "registerCloseCallback")
def test_close_callback(self, mock_close):
@@ -1052,6 +1052,12 @@ Active: 8381604 kB
'iowait': 6121490000000},
stats)
+ @mock.patch.object(fakelibvirt.virConnect, "getCPUMap")
+ def test_get_available_cpus(self, mock_map):
+ mock_map.return_value = (4, [True, True, False, False], None)
+ result = self.host.get_available_cpus()
+ self.assertEqual(result, {0, 1, 2, 3})
+
@mock.patch.object(fakelibvirt.virConnect, "defineXML")
def test_write_instance_config(self, mock_defineXML):
fake_dom_xml = """
@@ -1095,6 +1101,48 @@ Active: 8381604 kB
guest = self.host.write_instance_config(fake_dom_xml)
self.assertIsInstance(guest, libvirt_guest.Guest)
+ def test_check_machine_type_invalid(self):
+ fake_dom_xml = u"""
+ <capabilities>
+ <guest>
+ <os_type>hvm</os_type>
+ <arch name="alpha">
+ <emulator>/usr/bin/qemu-system-alpha</emulator>
+ <machine maxCpus="4">q35</machine>
+ <machine maxCpus="1">integratorcp</machine>
+ <machine maxCpus="1">versatileab</machine>
+ <domain type="qemu"/>
+ </arch>
+ </guest>
+ </capabilities>
+ """
+
+ self.assertRaises(
+ exception.InvalidMachineType,
+ self.host._check_machine_type, fake_dom_xml, 'Q35'
+ )
+
+ def test_check_machine_type_valid(self):
+ fake_dom_xml = u"""
+ <capabilities>
+ <guest>
+ <os_type>hvm</os_type>
+ <arch name="alpha">
+ <emulator>/usr/bin/qemu-system-alpha</emulator>
+ <machine maxCpus="4">q35</machine>
+ <machine maxCpus="1">integratorcp</machine>
+ <machine maxCpus="1">versatileab</machine>
+ <domain type="qemu"/>
+ </arch>
+ </guest>
+ </capabilities>
+ """
+
+ self.assertIsNone(
+ self.host._check_machine_type(fake_dom_xml, 'q35'),
+ "None msg"
+ )
+
@mock.patch.object(fakelibvirt.virConnect, "nodeDeviceLookupByName")
def test_device_lookup_by_name(self, mock_nodeDeviceLookupByName):
self.host.device_lookup_by_name("foo")
@@ -1113,13 +1161,14 @@ Active: 8381604 kB
expect_vf = ["rx", "tx", "sg", "tso", "gso", "gro", "rxvlan", "txvlan"]
self.assertEqual(expect_vf, actualvf)
- @mock.patch.object(pci_utils, 'get_ifname_by_pci_address')
- def test_get_pcidev_info_non_nic(self, mock_get_ifname):
+ def test_get_pcidev_info_non_nic(self):
+ pci_utils.get_mac_by_pci_address.side_effect = (
+ exception.PciDeviceNotFoundById('0000:04:00.3'))
dev_name = "pci_0000_04_11_7"
pci_dev = fakelibvirt.NodeDevice(
self.host._get_connection(),
xml=fake_libvirt_data._fake_NodeDevXml[dev_name])
- actual_vf = self.host._get_pcidev_info(dev_name, pci_dev, [], [])
+ actual_vf = self.host._get_pcidev_info(dev_name, pci_dev, [], [], [])
expect_vf = {
"dev_id": dev_name, "address": "0000:04:11.7",
"product_id": '1520', "numa_node": 0,
@@ -1128,14 +1177,15 @@ Active: 8381604 kB
'parent_addr': '0000:04:00.3',
}
self.assertEqual(expect_vf, actual_vf)
- mock_get_ifname.assert_not_called()
+ pci_utils.get_ifname_by_pci_address.assert_not_called()
- @mock.patch.object(pci_utils, 'get_ifname_by_pci_address',
- return_value='ens1')
- def test_get_pcidev_info(self, mock_get_ifname):
+ def test_get_pcidev_info(self):
+ pci_utils.get_ifname_by_pci_address.return_value = 'ens1'
devs = {
"pci_0000_04_00_3", "pci_0000_04_10_7", "pci_0000_04_11_7",
- "pci_0000_04_00_1", "pci_0000_03_00_0", "pci_0000_03_00_1"
+ "pci_0000_04_00_1", "pci_0000_03_00_0", "pci_0000_03_00_1",
+ "pci_0000_82_00_0", "pci_0000_82_00_3", "pci_0001_82_00_3",
+ "pci_0002_82_00_3",
}
node_devs = {}
for dev_name in devs:
@@ -1150,11 +1200,13 @@ Active: 8381604 kB
xml=fake_libvirt_data._fake_NodeDevXml[child]))
net_devs = [
dev for dev in node_devs.values() if dev.name() not in devs]
+ pci_devs = [
+ dev for dev in node_devs.values() if dev.name() in devs]
name = "pci_0000_04_00_3"
- actual_vf = self.host._get_pcidev_info(
- name, node_devs[name], net_devs, [])
- expect_vf = {
+ actual_pf = self.host._get_pcidev_info(
+ name, node_devs[name], net_devs, [], [])
+ expect_pf = {
"dev_id": "pci_0000_04_00_3",
"address": "0000:04:00.3",
"product_id": '1521',
@@ -1162,12 +1214,14 @@ Active: 8381604 kB
"vendor_id": '8086',
"label": 'label_8086_1521',
"dev_type": obj_fields.PciDeviceType.SRIOV_PF,
+ # value defined in the LibvirtFixture
+ "mac_address": "52:54:00:1e:59:c6",
}
- self.assertEqual(expect_vf, actual_vf)
+ self.assertEqual(expect_pf, actual_pf)
name = "pci_0000_04_10_7"
actual_vf = self.host._get_pcidev_info(
- name, node_devs[name], net_devs, [])
+ name, node_devs[name], net_devs, [], [])
expect_vf = {
"dev_id": "pci_0000_04_10_7",
"address": "0000:04:10.7",
@@ -1180,13 +1234,15 @@ Active: 8381604 kB
"parent_ifname": "ens1",
"capabilities": {
"network": ["rx", "tx", "sg", "tso", "gso", "gro",
- "rxvlan", "txvlan"]},
+ "rxvlan", "txvlan"],
+ "sriov": {"pf_mac_address": "52:54:00:1e:59:c6",
+ "vf_num": 1}},
}
self.assertEqual(expect_vf, actual_vf)
name = "pci_0000_04_11_7"
actual_vf = self.host._get_pcidev_info(
- name, node_devs[name], net_devs, [])
+ name, node_devs[name], net_devs, [], [])
expect_vf = {
"dev_id": "pci_0000_04_11_7",
"address": "0000:04:11.7",
@@ -1198,14 +1254,16 @@ Active: 8381604 kB
"parent_addr": '0000:04:00.3',
"capabilities": {
"network": ["rx", "tx", "sg", "tso", "gso", "gro",
- "rxvlan", "txvlan"]},
+ "rxvlan", "txvlan"],
+ "sriov": {"pf_mac_address": "52:54:00:1e:59:c6",
+ "vf_num": 1}},
"parent_ifname": "ens1",
}
self.assertEqual(expect_vf, actual_vf)
name = "pci_0000_04_00_1"
actual_vf = self.host._get_pcidev_info(
- name, node_devs[name], net_devs, [])
+ name, node_devs[name], net_devs, [], [])
expect_vf = {
"dev_id": "pci_0000_04_00_1",
"address": "0000:04:00.1",
@@ -1218,9 +1276,9 @@ Active: 8381604 kB
self.assertEqual(expect_vf, actual_vf)
name = "pci_0000_03_00_0"
- actual_vf = self.host._get_pcidev_info(
- name, node_devs[name], net_devs, [])
- expect_vf = {
+ actual_pf = self.host._get_pcidev_info(
+ name, node_devs[name], net_devs, [], [])
+ expect_pf = {
"dev_id": "pci_0000_03_00_0",
"address": "0000:03:00.0",
"product_id": '1013',
@@ -1228,13 +1286,15 @@ Active: 8381604 kB
"vendor_id": '15b3',
"label": 'label_15b3_1013',
"dev_type": obj_fields.PciDeviceType.SRIOV_PF,
+ # value defined in the LibvirtFixture
+ "mac_address": "52:54:00:1e:59:c6",
}
- self.assertEqual(expect_vf, actual_vf)
+ self.assertEqual(expect_pf, actual_pf)
name = "pci_0000_03_00_1"
- actual_vf = self.host._get_pcidev_info(
- name, node_devs[name], net_devs, [])
- expect_vf = {
+ actual_pf = self.host._get_pcidev_info(
+ name, node_devs[name], net_devs, [], [])
+ expect_pf = {
"dev_id": "pci_0000_03_00_1",
"address": "0000:03:00.1",
"product_id": '1013',
@@ -1242,7 +1302,97 @@ Active: 8381604 kB
"vendor_id": '15b3',
"label": 'label_15b3_1013',
"dev_type": obj_fields.PciDeviceType.SRIOV_PF,
+ # value defined in the LibvirtFixture
+ "mac_address": "52:54:00:1e:59:c6",
}
+ self.assertEqual(expect_pf, actual_pf)
+
+ # Parent PF with a VPD cap.
+ name = "pci_0000_82_00_0"
+ actual_pf = self.host._get_pcidev_info(
+ name, node_devs[name], net_devs, [], pci_devs)
+ expect_pf = {
+ "dev_id": "pci_0000_82_00_0",
+ "address": "0000:82:00.0",
+ "product_id": "a2d6",
+ "numa_node": 1,
+ "vendor_id": "15b3",
+ "label": "label_15b3_a2d6",
+ "dev_type": obj_fields.PciDeviceType.SRIOV_PF,
+ "capabilities": {
+ # Should be obtained from the parent PF in this case.
+ "vpd": {"card_serial_number": "MT2113X00000"}},
+ # value defined in the LibvirtFixture
+ "mac_address": "52:54:00:1e:59:c6",
+ }
+ self.assertEqual(expect_pf, actual_pf)
+
+ # A VF without a VPD cap with a parent PF that has a VPD cap.
+ name = "pci_0000_82_00_3"
+ actual_vf = self.host._get_pcidev_info(
+ name, node_devs[name], net_devs, [], pci_devs)
+ expect_vf = {
+ "dev_id": "pci_0000_82_00_3",
+ "address": "0000:82:00.3",
+ "parent_addr": "0000:82:00.0",
+ "product_id": "101e",
+ "numa_node": 1,
+ "vendor_id": "15b3",
+ "label": "label_15b3_101e",
+ "dev_type": obj_fields.PciDeviceType.SRIOV_VF,
+ "parent_ifname": "ens1",
+ "capabilities": {
+ "network": ["rx", "tx", "sg", "tso", "gso", "gro", "rxvlan",
+ "txvlan", "rxhash"],
+ "sriov": {"pf_mac_address": "52:54:00:1e:59:c6",
+ "vf_num": 1},
+ # Should be obtained from the parent PF in this case.
+ "vpd": {"card_serial_number": "MT2113X00000"}},
+ }
+ self.assertEqual(expect_vf, actual_vf)
+
+ # A VF with a VPD cap without a test parent dev (used to check the
+ # VPD code path when a VF's own VPD capability is used).
+ name = "pci_0001_82_00_3"
+ actual_vf = self.host._get_pcidev_info(
+ name, node_devs[name], net_devs, [], pci_devs)
+ expect_vf = {
+ "dev_id": "pci_0001_82_00_3",
+ "address": "0001:82:00.3",
+ "parent_addr": "0001:82:00.0",
+ "product_id": "101e",
+ "numa_node": 1,
+ "vendor_id": "15b3",
+ "label": "label_15b3_101e",
+ "dev_type": obj_fields.PciDeviceType.SRIOV_VF,
+ "capabilities": {
+ # Should be obtained from the parent PF in this case.
+ "vpd": {"card_serial_number": "MT2113XBEEF0"}},
+ }
+
+ # A VF without a VPD cap and without a test parent dev
+ # (used to check the code path where a VF VPD capability is
+ # checked but is not present and a parent PF info is not available).
+ name = "pci_0002_82_00_3"
+ actual_vf = self.host._get_pcidev_info(
+ name, node_devs[name], net_devs, [], pci_devs)
+ expect_vf = {
+ "dev_id": "pci_0002_82_00_3",
+ "address": "0002:82:00.3",
+ "parent_addr": "0002:82:00.0",
+ "product_id": "101e",
+ "numa_node": 1,
+ "vendor_id": "15b3",
+ "label": "label_15b3_101e",
+ "dev_type": obj_fields.PciDeviceType.SRIOV_VF,
+ 'parent_ifname': 'ens1',
+ "capabilities": {
+ "network": ["rx", "tx", "sg", "tso", "gso", "gro",
+ "rxvlan", "txvlan", "rxhash"],
+ "sriov": {"pf_mac_address": "52:54:00:1e:59:c6",
+ "vf_num": 1}},
+ }
+
self.assertEqual(expect_vf, actual_vf)
def test_list_pci_devices(self):
@@ -1469,25 +1619,59 @@ Active: 8381604 kB
self.host.compare_cpu("cpuxml")
mock_compareCPU.assert_called_once_with("cpuxml", 0)
- def test_is_cpu_control_policy_capable_ok(self):
+ def test_is_cpu_control_policy_capable_via_neither(self):
+ self.useFixture(nova_fixtures.CGroupsFixture(version=0))
+ self.assertFalse(self.host.is_cpu_control_policy_capable())
+
+ def test_is_cpu_control_policy_capable_via_cgroupsv1(self):
+ self.useFixture(nova_fixtures.CGroupsFixture(version=1))
+ self.assertTrue(self.host.is_cpu_control_policy_capable())
+
+ def test_is_cpu_control_policy_capable_via_cgroupsv2(self):
+ self.useFixture(nova_fixtures.CGroupsFixture(version=2))
+ self.assertTrue(self.host.is_cpu_control_policy_capable())
+
+ def test_has_cgroupsv1_cpu_controller_ok(self):
m = mock.mock_open(
- read_data="""cg /cgroup/cpu,cpuacct cg opt1,cpu,opt3 0 0
-cg /cgroup/memory cg opt1,opt2 0 0
-""")
- with mock.patch('builtins.open', m, create=True):
- self.assertTrue(self.host.is_cpu_control_policy_capable())
+ read_data=(
+ "cg /cgroup/cpu,cpuacct cg opt1,cpu,opt3 0 0"
+ "cg /cgroup/memory cg opt1,opt2 0 0"
+ )
+ )
+ with mock.patch("builtins.open", m, create=True):
+ self.assertTrue(self.host._has_cgroupsv1_cpu_controller())
+
+ def test_has_cgroupsv1_cpu_controller_ko(self):
+ m = mock.mock_open(
+ read_data=(
+ "cg /cgroup/cpu,cpuacct cg opt1,opt2,opt3 0 0"
+ "cg /cgroup/memory cg opt1,opt2 0 0"
+ )
+ )
+ with mock.patch("builtins.open", m, create=True):
+ self.assertFalse(self.host._has_cgroupsv1_cpu_controller())
+
+ @mock.patch("builtins.open", side_effect=IOError)
+ def test_has_cgroupsv1_cpu_controller_ioerror(self, _):
+ self.assertFalse(self.host._has_cgroupsv1_cpu_controller())
+
+ def test_has_cgroupsv2_cpu_controller_ok(self):
+ m = mock.mock_open(
+ read_data="cpuset cpu io memory hugetlb pids rdma misc"
+ )
+ with mock.patch("builtins.open", m, create=True):
+ self.assertTrue(self.host._has_cgroupsv2_cpu_controller())
- def test_is_cpu_control_policy_capable_ko(self):
+ def test_has_cgroupsv2_cpu_controller_ko(self):
m = mock.mock_open(
- read_data="""cg /cgroup/cpu,cpuacct cg opt1,opt2,opt3 0 0
-cg /cgroup/memory cg opt1,opt2 0 0
-""")
- with mock.patch('builtins.open', m, create=True):
- self.assertFalse(self.host.is_cpu_control_policy_capable())
+ read_data="memory pids"
+ )
+ with mock.patch("builtins.open", m, create=True):
+ self.assertFalse(self.host._has_cgroupsv2_cpu_controller())
- @mock.patch('builtins.open', side_effect=IOError)
- def test_is_cpu_control_policy_capable_ioerror(self, mock_open):
- self.assertFalse(self.host.is_cpu_control_policy_capable())
+ @mock.patch("builtins.open", side_effect=IOError)
+ def test_has_cgroupsv2_cpu_controller_ioerror(self, _):
+ self.assertFalse(self.host._has_cgroupsv2_cpu_controller())
def test_get_canonical_machine_type(self):
# this test relies on configuration from the FakeLibvirtFixture
@@ -1737,6 +1921,16 @@ cg /cgroup/memory cg opt1,opt2 0 0
"""
self.assertTrue(self.host.supports_secure_boot)
+ @mock.patch.object(fakelibvirt.virConnect, "getLibVersion")
+ def test_supports_remote_managed_ports__true(self, mock_libversion):
+ mock_libversion.return_value = 7009000
+ self.assertTrue(self.host.supports_remote_managed_ports)
+
+ @mock.patch.object(fakelibvirt.virConnect, "getLibVersion")
+ def test_supports_remote_managed_ports__false(self, mock_libversion):
+ mock_libversion.return_value = 7008000
+ self.assertFalse(self.host.supports_remote_managed_ports)
+
@mock.patch.object(host.Host, 'loaders', new_callable=mock.PropertyMock)
@mock.patch.object(host.Host, 'get_canonical_machine_type')
def test_get_loader(self, mock_get_mtype, mock_loaders):
@@ -1811,6 +2005,14 @@ cg /cgroup/memory cg opt1,opt2 0 0
loader = self.host.get_loader('x86_64', 'q35', has_secure_boot=True)
self.assertIsNotNone(loader)
+ # check that SMM bool is false as we don't need it
+ self.assertFalse(loader[2])
+
+ # check that we get SMM bool correctly (True) when required
+ loaders[0]['features'].append('requires-smm')
+ loader = self.host.get_loader('x86_64', 'q35', has_secure_boot=True)
+ self.assertTrue(loader[2])
+
# while it should fail here since we don't want it now
self.assertRaises(
exception.UEFINotSupported,
@@ -1831,6 +2033,7 @@ class TestLibvirtSEV(test.NoDBTestCase):
self.host = host.Host("qemu:///system")
+@ddt.ddt
class TestLibvirtSEVUnsupported(TestLibvirtSEV):
@mock.patch.object(os.path, 'exists', return_value=False)
def test_kernel_parameter_missing(self, fake_exists):
@@ -1838,19 +2041,26 @@ class TestLibvirtSEVUnsupported(TestLibvirtSEV):
fake_exists.assert_called_once_with(
'/sys/module/kvm_amd/parameters/sev')
+ @ddt.data(
+ ('0\n', False),
+ ('N\n', False),
+ ('1\n', True),
+ ('Y\n', True),
+ )
+ @ddt.unpack
@mock.patch.object(os.path, 'exists', return_value=True)
- @mock.patch('builtins.open', mock.mock_open(read_data="0\n"))
- def test_kernel_parameter_zero(self, fake_exists):
- self.assertFalse(self.host._kernel_supports_amd_sev())
- fake_exists.assert_called_once_with(
- '/sys/module/kvm_amd/parameters/sev')
-
- @mock.patch.object(os.path, 'exists', return_value=True)
- @mock.patch('builtins.open', mock.mock_open(read_data="1\n"))
- def test_kernel_parameter_one(self, fake_exists):
- self.assertTrue(self.host._kernel_supports_amd_sev())
- fake_exists.assert_called_once_with(
- '/sys/module/kvm_amd/parameters/sev')
+ def test_kernel_parameter(
+ self, sev_param_value, expected_support, mock_exists
+ ):
+ with mock.patch(
+ 'builtins.open', mock.mock_open(read_data=sev_param_value)
+ ):
+ self.assertIs(
+ expected_support,
+ self.host._kernel_supports_amd_sev()
+ )
+ mock_exists.assert_called_once_with(
+ '/sys/module/kvm_amd/parameters/sev')
@mock.patch.object(os.path, 'exists', return_value=True)
@mock.patch('builtins.open', mock.mock_open(read_data="1\n"))
diff --git a/nova/tests/unit/virt/libvirt/test_imagebackend.py b/nova/tests/unit/virt/libvirt/test_imagebackend.py
index decb27f982..0dc1009c92 100644
--- a/nova/tests/unit/virt/libvirt/test_imagebackend.py
+++ b/nova/tests/unit/virt/libvirt/test_imagebackend.py
@@ -19,11 +19,11 @@ import inspect
import os
import shutil
import tempfile
+from unittest import mock
from castellan import key_manager
import ddt
import fixtures
-import mock
from oslo_concurrency import lockutils
from oslo_config import fixture as config_fixture
from oslo_service import loopingcall
@@ -163,7 +163,13 @@ class _ImageTestCase(object):
self.assertEqual(fs.source_file, image.path)
def test_libvirt_info(self):
- image = self.image_class(self.INSTANCE, self.NAME)
+ disk_info = {
+ 'bus': 'virtio',
+ 'dev': '/dev/vda',
+ 'type': 'cdrom',
+ }
+ image = self.image_class(
+ self.INSTANCE, self.NAME, disk_info_mapping=disk_info)
extra_specs = {
'quota:disk_read_bytes_sec': 10 * units.Mi,
'quota:disk_read_iops_sec': 1 * units.Ki,
@@ -172,15 +178,9 @@ class _ImageTestCase(object):
'quota:disk_total_bytes_sec': 30 * units.Mi,
'quota:disk_total_iops_sec': 3 * units.Ki,
}
- disk_info = {
- 'bus': 'virtio',
- 'dev': '/dev/vda',
- 'type': 'cdrom',
- }
disk = image.libvirt_info(
- disk_info, cache_mode="none", extra_specs=extra_specs,
- boot_order="1")
+ cache_mode="none", extra_specs=extra_specs, boot_order="1")
self.assertIsInstance(disk, vconfig.LibvirtConfigGuestDisk)
self.assertEqual("/dev/vda", disk.target_dev)
@@ -205,16 +205,18 @@ class _ImageTestCase(object):
get_disk_size.assert_called_once_with(image.path)
def _test_libvirt_info_scsi_with_unit(self, disk_unit):
- # The address should be set if bus is scsi and unit is set.
- # Otherwise, it should not be set at all.
- image = self.image_class(self.INSTANCE, self.NAME)
disk_info = {
'bus': 'scsi',
'dev': '/dev/sda',
'type': 'disk',
}
+ # The address should be set if bus is scsi and unit is set.
+ # Otherwise, it should not be set at all.
+ image = self.image_class(
+ self.INSTANCE, self.NAME, disk_info_mapping=disk_info)
+
disk = image.libvirt_info(
- disk_info, cache_mode='none', extra_specs={}, disk_unit=disk_unit)
+ cache_mode='none', extra_specs={}, disk_unit=disk_unit)
if disk_unit:
self.assertEqual(0, disk.device_addr.controller)
self.assertEqual(disk_unit, disk.device_addr.unit)
@@ -523,7 +525,7 @@ class Qcow2TestCase(_ImageTestCase, test.NoDBTestCase):
mock_exists.assert_has_calls(exist_calls)
@mock.patch.object(imagebackend.utils, 'synchronized')
- @mock.patch('nova.virt.libvirt.utils.create_cow_image')
+ @mock.patch('nova.virt.libvirt.utils.create_image')
@mock.patch.object(os.path, 'exists', side_effect=[])
@mock.patch.object(imagebackend.Image, 'verify_base_size')
@mock.patch('nova.privsep.path.utime')
@@ -544,14 +546,14 @@ class Qcow2TestCase(_ImageTestCase, test.NoDBTestCase):
mock_verify.assert_called_once_with(self.TEMPLATE_PATH, self.SIZE)
mock_create.assert_called_once_with(
- self.TEMPLATE_PATH, self.PATH, self.SIZE)
+ self.PATH, 'qcow2', self.SIZE, backing_file=self.TEMPLATE_PATH)
fn.assert_called_once_with(target=self.TEMPLATE_PATH)
mock_exist.assert_has_calls(exist_calls)
self.assertTrue(mock_sync.called)
mock_utime.assert_called()
@mock.patch.object(imagebackend.utils, 'synchronized')
- @mock.patch('nova.virt.libvirt.utils.create_cow_image')
+ @mock.patch('nova.virt.libvirt.utils.create_image')
@mock.patch.object(imagebackend.disk, 'extend')
@mock.patch.object(os.path, 'exists', side_effect=[])
@mock.patch.object(imagebackend.Qcow2, 'get_disk_size')
@@ -576,7 +578,7 @@ class Qcow2TestCase(_ImageTestCase, test.NoDBTestCase):
self.assertFalse(mock_extend.called)
@mock.patch.object(imagebackend.utils, 'synchronized')
- @mock.patch('nova.virt.libvirt.utils.create_cow_image')
+ @mock.patch('nova.virt.libvirt.utils.create_image')
@mock.patch('nova.virt.libvirt.utils.get_disk_backing_file')
@mock.patch.object(imagebackend.disk, 'extend')
@mock.patch.object(os.path, 'exists', side_effect=[])
@@ -615,7 +617,7 @@ class Qcow2TestCase(_ImageTestCase, test.NoDBTestCase):
mock_utime.assert_called()
@mock.patch.object(imagebackend.utils, 'synchronized')
- @mock.patch('nova.virt.libvirt.utils.create_cow_image')
+ @mock.patch('nova.virt.libvirt.utils.create_image')
@mock.patch('nova.virt.libvirt.utils.get_disk_backing_file')
@mock.patch.object(imagebackend.disk, 'extend')
@mock.patch.object(os.path, 'exists', side_effect=[])
diff --git a/nova/tests/unit/virt/libvirt/test_imagecache.py b/nova/tests/unit/virt/libvirt/test_imagecache.py
index f6e592231d..a005a6cf20 100644
--- a/nova/tests/unit/virt/libvirt/test_imagecache.py
+++ b/nova/tests/unit/virt/libvirt/test_imagecache.py
@@ -18,8 +18,8 @@ import contextlib
import io
import os
import time
+from unittest import mock
-import mock
from oslo_concurrency import lockutils
from oslo_concurrency import processutils
from oslo_log import formatters
diff --git a/nova/tests/unit/virt/libvirt/test_machine_type_utils.py b/nova/tests/unit/virt/libvirt/test_machine_type_utils.py
index 42043ac495..08c54d02d3 100644
--- a/nova/tests/unit/virt/libvirt/test_machine_type_utils.py
+++ b/nova/tests/unit/virt/libvirt/test_machine_type_utils.py
@@ -10,8 +10,9 @@
# License for the specific language governing permissions and limitations
# under the License.
+from unittest import mock
+
import ddt
-import mock
from oslo_utils.fixture import uuidsentinel
from oslo_utils import uuidutils
diff --git a/nova/tests/unit/virt/libvirt/test_migration.py b/nova/tests/unit/virt/libvirt/test_migration.py
index f4e64fbe53..155c259986 100644
--- a/nova/tests/unit/virt/libvirt/test_migration.py
+++ b/nova/tests/unit/virt/libvirt/test_migration.py
@@ -15,9 +15,9 @@
from collections import deque
import copy
import textwrap
+from unittest import mock
from lxml import etree
-import mock
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import units
@@ -28,6 +28,7 @@ from nova import objects
from nova import test
from nova.tests import fixtures as nova_fixtures
from nova.tests.fixtures import libvirt as fakelibvirt
+from nova.tests.unit.virt.libvirt import test_driver
from nova.virt.libvirt import config as vconfig
from nova.virt.libvirt import guest as libvirt_guest
from nova.virt.libvirt import host
@@ -80,16 +81,51 @@ class UtilityMigrationTestCase(test.NoDBTestCase):
get_volume_config = mock.MagicMock()
mock_guest.get_xml_desc.return_value = '<domain></domain>'
- migration.get_updated_guest_xml(
- mock.sentinel.instance, mock_guest, data, get_volume_config)
+ instance = objects.Instance(**test_driver._create_test_instance())
+ migration.get_updated_guest_xml(instance, mock_guest, data,
+ get_volume_config)
mock_graphics.assert_called_once_with(mock.ANY, data)
mock_serial.assert_called_once_with(mock.ANY, data)
mock_volume.assert_called_once_with(
- mock.ANY, data, mock.sentinel.instance, get_volume_config)
+ mock.ANY, data, instance, get_volume_config)
mock_perf_events_xml.assert_called_once_with(mock.ANY, data)
mock_memory_backing.assert_called_once_with(mock.ANY, data)
self.assertEqual(1, mock_tostring.called)
+ def test_update_quota_xml(self):
+ old_xml = """<domain>
+ <name>fake-instance</name>
+ <cputune>
+ <shares>42</shares>
+ <period>1337</period>
+ </cputune>
+ </domain>"""
+ instance = objects.Instance(**test_driver._create_test_instance())
+ new_xml = migration._update_quota_xml(instance,
+ etree.fromstring(old_xml))
+ new_xml = etree.tostring(new_xml, encoding='unicode')
+ self.assertXmlEqual(
+ """<domain>
+ <name>fake-instance</name>
+ <cputune>
+ <period>1337</period>
+ </cputune>
+ </domain>""", new_xml)
+
+ def test_update_quota_xml_empty_cputune(self):
+ old_xml = """<domain>
+ <name>fake-instance</name>
+ <cputune>
+ <shares>42</shares>
+ </cputune>
+ </domain>"""
+ instance = objects.Instance(**test_driver._create_test_instance())
+ new_xml = migration._update_quota_xml(instance,
+ etree.fromstring(old_xml))
+ new_xml = etree.tostring(new_xml, encoding='unicode')
+ self.assertXmlEqual('<domain><name>fake-instance</name></domain>',
+ new_xml)
+
def test_update_device_resources_xml_vpmem(self):
# original xml for vpmems, /dev/dax0.1 and /dev/dax0.2 here
# are vpmem device path on source host
diff --git a/nova/tests/unit/virt/libvirt/test_utils.py b/nova/tests/unit/virt/libvirt/test_utils.py
index 4e73c662c5..c648108f56 100644
--- a/nova/tests/unit/virt/libvirt/test_utils.py
+++ b/nova/tests/unit/virt/libvirt/test_utils.py
@@ -18,9 +18,9 @@ import grp
import os
import pwd
import tempfile
+from unittest import mock
import ddt
-import mock
import os_traits
from oslo_config import cfg
from oslo_utils import fileutils
@@ -103,33 +103,98 @@ class LibvirtUtilsTestCase(test.NoDBTestCase):
def test_valid_hostname_bad(self):
self.assertFalse(libvirt_utils.is_valid_hostname("foo/?com=/bin/sh"))
- @mock.patch('oslo_concurrency.processutils.execute')
- def test_create_image(self, mock_execute):
- libvirt_utils.create_image('raw', '/some/path', '10G')
- libvirt_utils.create_image('qcow2', '/some/stuff', '1234567891234')
- expected_args = [(('qemu-img', 'create', '-f', 'raw',
- '/some/path', '10G'),),
- (('qemu-img', 'create', '-f', 'qcow2',
- '/some/stuff', '1234567891234'),)]
- self.assertEqual(expected_args, mock_execute.call_args_list)
-
- @mock.patch('os.path.exists', return_value=True)
+ @mock.patch('tempfile.NamedTemporaryFile')
@mock.patch('oslo_concurrency.processutils.execute')
@mock.patch('nova.virt.images.qemu_img_info')
- def test_create_cow_image(self, mock_info, mock_execute, mock_exists):
- mock_execute.return_value = ('stdout', None)
+ def _test_create_image(
+ self, path, disk_format, disk_size, mock_info, mock_execute,
+ mock_ntf, backing_file=None, encryption=None
+ ):
mock_info.return_value = mock.Mock(
file_format=mock.sentinel.backing_fmt,
- cluster_size=mock.sentinel.cluster_size)
- libvirt_utils.create_cow_image(mock.sentinel.backing_path,
- mock.sentinel.new_path)
- mock_info.assert_called_once_with(mock.sentinel.backing_path)
- mock_execute.assert_has_calls([mock.call(
- 'qemu-img', 'create', '-f', 'qcow2', '-o',
- 'backing_file=%s,backing_fmt=%s,cluster_size=%s' % (
- mock.sentinel.backing_path, mock.sentinel.backing_fmt,
- mock.sentinel.cluster_size),
- mock.sentinel.new_path)])
+ cluster_size=mock.sentinel.cluster_size,
+ )
+ fh = mock_ntf.return_value.__enter__.return_value
+
+ libvirt_utils.create_image(
+ path, disk_format, disk_size, backing_file=backing_file,
+ encryption=encryption,
+ )
+
+ cow_opts = []
+
+ if backing_file is None:
+ mock_info.assert_not_called()
+ else:
+ mock_info.assert_called_once_with(backing_file)
+ cow_opts = [
+ '-o',
+ f'backing_file={mock.sentinel.backing_file},'
+ f'backing_fmt={mock.sentinel.backing_fmt},'
+ f'cluster_size={mock.sentinel.cluster_size}',
+ ]
+
+ encryption_opts = []
+
+ if encryption:
+ encryption_opts = [
+ '--object', f"secret,id=sec,file={fh.name}",
+ '-o', 'encrypt.key-secret=sec',
+ '-o', f"encrypt.format={encryption.get('format')}",
+ ]
+
+ encryption_options = {
+ 'cipher-alg': 'aes-256',
+ 'cipher-mode': 'xts',
+ 'hash-alg': 'sha256',
+ 'iter-time': 2000,
+ 'ivgen-alg': 'plain64',
+ 'ivgen-hash-alg': 'sha256',
+ }
+ for option, value in encryption_options.items():
+ encryption_opts += [
+ '-o',
+ f'encrypt.{option}={value}',
+ ]
+
+ expected_args = (
+ 'env', 'LC_ALL=C', 'LANG=C', 'qemu-img', 'create', '-f',
+ disk_format, *cow_opts, *encryption_opts, path,
+ )
+ if disk_size is not None:
+ expected_args += (disk_size,)
+
+ self.assertEqual([(expected_args,)], mock_execute.call_args_list)
+
+ def test_create_image_raw(self):
+ self._test_create_image('/some/path', 'raw', '10G')
+
+ def test_create_image_qcow2(self):
+ self._test_create_image(
+ '/some/stuff', 'qcow2', '1234567891234',
+ )
+
+ def test_create_image_backing_file(self):
+ self._test_create_image(
+ '/some/stuff', 'qcow2', '1234567891234',
+ backing_file=mock.sentinel.backing_file,
+ )
+
+ def test_create_image_size_none(self):
+ self._test_create_image(
+ '/some/stuff', 'qcow2', None,
+ backing_file=mock.sentinel.backing_file,
+ )
+
+ def test_create_image_encryption(self):
+ encryption = {
+ 'secret': 'a_secret',
+ 'format': 'luks',
+ }
+ self._test_create_image(
+ '/some/stuff', 'qcow2', '1234567891234',
+ encryption=encryption,
+ )
@ddt.unpack
@ddt.data({'fs_type': 'some_fs_type',
diff --git a/nova/tests/unit/virt/libvirt/test_vif.py b/nova/tests/unit/virt/libvirt/test_vif.py
index 43504efeb5..6d87ed727c 100644
--- a/nova/tests/unit/virt/libvirt/test_vif.py
+++ b/nova/tests/unit/virt/libvirt/test_vif.py
@@ -12,9 +12,10 @@
# License for the specific language governing permissions and limitations
# under the License.
+from unittest import mock
+
import fixtures
from lxml import etree
-import mock
import os_vif
from os_vif import exception as osv_exception
from os_vif import objects as osv_objects
@@ -517,18 +518,17 @@ class LibvirtVifTestCase(test.NoDBTestCase):
def setUp(self):
super(LibvirtVifTestCase, self).setUp()
- self.useFixture(nova_fixtures.LibvirtFixture(stub_os_vif=False))
+ self.libvirt = self.useFixture(
+ nova_fixtures.LibvirtFixture(stub_os_vif=False))
# os_vif.initialize is typically done in nova-compute startup
os_vif.initialize()
self.setup_os_vif_objects()
# multiqueue configuration is host OS specific
- _a = mock.patch('os.uname')
- self.mock_uname = _a.start()
+ self.mock_uname = self.libvirt.mock_uname
self.mock_uname.return_value = fakelibvirt.os_uname(
'Linux', '', '5.10.13-200-generic', '', 'x86_64')
- self.addCleanup(_a.stop)
def _get_node(self, xml):
doc = etree.fromstring(xml)
@@ -899,7 +899,7 @@ class LibvirtVifTestCase(test.NoDBTestCase):
@mock.patch.object(vif.designer, 'set_vif_guest_frontend_config',
wraps=vif.designer.set_vif_guest_frontend_config)
- def _test_model_sriov(self, vinc_type, mock_set):
+ def _test_model_sriov(self, vnic_type, mock_set):
"""Direct attach vNICs shouldn't retrieve info from image_meta."""
self.flags(use_virtio_for_bridges=True,
virt_type='kvm',
@@ -911,7 +911,7 @@ class LibvirtVifTestCase(test.NoDBTestCase):
image_meta = objects.ImageMeta.from_dict(
{'properties': {'hw_vif_model': 'virtio'}})
conf = d.get_base_config(None, 'ca:fe:de:ad:be:ef', image_meta,
- None, 'kvm', vinc_type)
+ None, 'kvm', vnic_type)
mock_set.assert_called_once_with(mock.ANY, 'ca:fe:de:ad:be:ef',
None, None, None, None)
self.assertIsNone(conf.vhost_queues)
@@ -983,14 +983,9 @@ class LibvirtVifTestCase(test.NoDBTestCase):
self.vif_bridge,
self.vif_bridge['network']['bridge'])
- @mock.patch.object(pci_utils, 'get_ifname_by_pci_address')
- @mock.patch.object(pci_utils, 'get_vf_num_by_pci_address', return_value=1)
- @mock.patch('nova.privsep.linux_net.set_device_macaddr')
- @mock.patch('nova.privsep.linux_net.set_device_macaddr_and_vlan')
- def _test_hw_veb_op(self, op, vlan, mock_set_macaddr_and_vlan,
- mock_set_macaddr, mock_get_vf_num,
- mock_get_ifname):
- mock_get_ifname.side_effect = ['eth1', 'eth13']
+ def _test_hw_veb_op(self, op, vlan):
+ self.libvirt.mock_get_vf_num_by_pci_address.return_value = 1
+ pci_utils.get_ifname_by_pci_address.side_effect = ['eth1', 'eth13']
vlan_id = int(vlan)
port_state = 'up' if vlan_id > 0 else 'down'
mac = ('00:00:00:00:00:00' if op.__name__ == 'unplug'
@@ -1005,10 +1000,13 @@ class LibvirtVifTestCase(test.NoDBTestCase):
'set_macaddr': [mock.call('eth13', mac, port_state=port_state)]
}
op(self.instance, self.vif_hw_veb_macvtap)
- mock_get_ifname.assert_has_calls(calls['get_ifname'])
- mock_get_vf_num.assert_has_calls(calls['get_vf_num'])
- mock_set_macaddr.assert_has_calls(calls['set_macaddr'])
- mock_set_macaddr_and_vlan.assert_called_once_with(
+ pci_utils.get_ifname_by_pci_address.assert_has_calls(
+ calls['get_ifname'])
+ self.libvirt.mock_get_vf_num_by_pci_address.assert_has_calls(
+ calls['get_vf_num'])
+ self.libvirt.mock_set_device_macaddr.assert_has_calls(
+ calls['set_macaddr'])
+ self.libvirt.mock_set_device_macaddr_and_vlan.assert_called_once_with(
'eth1', 1, mock.ANY, vlan_id)
def test_plug_hw_veb(self):
@@ -1218,9 +1216,8 @@ class LibvirtVifTestCase(test.NoDBTestCase):
self.assertEqual(1, len(node))
self._assertPciEqual(node, self.vif_hostdev_physical)
- @mock.patch.object(pci_utils, 'get_ifname_by_pci_address',
- return_value='eth1')
- def test_hw_veb_driver_macvtap(self, mock_get_ifname):
+ def test_hw_veb_driver_macvtap(self):
+ pci_utils.get_ifname_by_pci_address.return_value = 'eth1'
d = vif.LibvirtGenericVIFDriver()
xml = self._get_instance_xml(d, self.vif_hw_veb_macvtap)
node = self._get_node(xml)
diff --git a/nova/tests/unit/virt/libvirt/volume/test_fibrechannel.py b/nova/tests/unit/virt/libvirt/volume/test_fibrechannel.py
index 89a59f2f1a..06065322f6 100644
--- a/nova/tests/unit/virt/libvirt/volume/test_fibrechannel.py
+++ b/nova/tests/unit/virt/libvirt/volume/test_fibrechannel.py
@@ -11,8 +11,8 @@
# under the License.
import platform
+from unittest import mock
-import mock
from os_brick.initiator import connector
from nova.objects import fields as obj_fields
diff --git a/nova/tests/unit/virt/libvirt/volume/test_fs.py b/nova/tests/unit/virt/libvirt/volume/test_fs.py
index eaa6568999..5619dff589 100644
--- a/nova/tests/unit/virt/libvirt/volume/test_fs.py
+++ b/nova/tests/unit/virt/libvirt/volume/test_fs.py
@@ -13,8 +13,8 @@
# under the License.
import os
+from unittest import mock
-import mock
from nova import test
from nova import utils
diff --git a/nova/tests/unit/virt/libvirt/volume/test_iscsi.py b/nova/tests/unit/virt/libvirt/volume/test_iscsi.py
index f8a64abea5..bd516b1dd6 100644
--- a/nova/tests/unit/virt/libvirt/volume/test_iscsi.py
+++ b/nova/tests/unit/virt/libvirt/volume/test_iscsi.py
@@ -10,9 +10,9 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
from os_brick import exception as os_brick_exception
from os_brick.initiator import connector
+from unittest import mock
from nova.tests.unit.virt.libvirt.volume import test_volume
from nova.virt.libvirt.volume import iscsi
diff --git a/nova/tests/unit/virt/libvirt/volume/test_lightos.py b/nova/tests/unit/virt/libvirt/volume/test_lightos.py
new file mode 100644
index 0000000000..8a85d73059
--- /dev/null
+++ b/nova/tests/unit/virt/libvirt/volume/test_lightos.py
@@ -0,0 +1,79 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from unittest import mock
+
+from nova.tests.unit.virt.libvirt.volume import test_volume
+from nova.virt.libvirt.volume import lightos
+
+from os_brick import initiator
+
+
+class LibvirtLightVolumeDriverTestCase(test_volume.LibvirtVolumeBaseTestCase):
+
+ @mock.patch('nova.utils.get_root_helper')
+ @mock.patch('os_brick.initiator.connector.InitiatorConnector.factory')
+ def test_libvirt_lightos_driver(self, mock_factory, mock_helper):
+ mock_helper.return_value = 'sudo'
+ lightos.LibvirtLightOSVolumeDriver(self.fake_host)
+ mock_factory.assert_called_once_with(
+ initiator.LIGHTOS, root_helper='sudo',
+ device_scan_attempts=5)
+
+ @mock.patch('os_brick.initiator.connector.InitiatorConnector.factory',
+ new=mock.Mock())
+ def test_libvirt_lightos_driver_connect(self):
+ lightos_driver = lightos.LibvirtLightOSVolumeDriver(
+ self.fake_host)
+ config = {'server_ip': '127.0.0.1', 'server_port': 9898}
+ disk_info = {
+ 'id': '1234567',
+ 'name': 'aLightVolume',
+ 'conf': config}
+ connection_info = {'data': disk_info}
+ lightos_driver.connector.connect_volume.return_value = (
+ {'path': '/dev/dms1234567'})
+
+ lightos_driver.connect_volume(connection_info, None)
+
+ lightos_driver.connector.connect_volume.assert_called_once_with(
+ connection_info['data'])
+ self.assertEqual(
+ '/dev/dms1234567',
+ connection_info['data']['device_path'])
+
+ @mock.patch('os_brick.initiator.connector.InitiatorConnector.factory',
+ new=mock.Mock(return_value=mock.Mock()))
+ def test_libvirt_lightos_driver_disconnect(self):
+ lightos_driver = lightos.LibvirtLightOSVolumeDriver(self.connr)
+ disk_info = {
+ 'path': '/dev/dms1234567', 'name': 'aLightosVolume',
+ 'type': 'raw', 'dev': 'vda1', 'bus': 'pci0',
+ 'device_path': '/dev/dms123456'}
+ connection_info = {'data': disk_info}
+ lightos_driver.disconnect_volume(connection_info, None)
+ lightos_driver.connector.disconnect_volume.assert_called_once_with(
+ disk_info, None)
+
+ @mock.patch('os_brick.initiator.connector.InitiatorConnector.factory',
+ new=mock.Mock(return_value=mock.Mock()))
+ def test_libvirt_lightos_driver_get_config(self):
+ lightos_driver = lightos.LibvirtLightOSVolumeDriver(self.fake_host)
+ device_path = '/dev/fake-dev'
+ connection_info = {'data': {'device_path': device_path}}
+
+ conf = lightos_driver.get_config(connection_info, self.disk_info)
+ tree = conf.format_dom()
+
+ self.assertEqual('block', tree.get('type'))
+ self.assertEqual(device_path, tree.find('./source').get('dev'))
+ self.assertEqual('raw', tree.find('./driver').get('type'))
diff --git a/nova/tests/unit/virt/libvirt/volume/test_mount.py b/nova/tests/unit/virt/libvirt/volume/test_mount.py
index b618e090ba..8ecb117f05 100644
--- a/nova/tests/unit/virt/libvirt/volume/test_mount.py
+++ b/nova/tests/unit/virt/libvirt/volume/test_mount.py
@@ -15,10 +15,10 @@
import os.path
import threading
import time
+from unittest import mock
import eventlet
import fixtures
-import mock
from oslo_concurrency import processutils
from oslo_utils.fixture import uuidsentinel as uuids
diff --git a/nova/tests/unit/virt/libvirt/volume/test_net.py b/nova/tests/unit/virt/libvirt/volume/test_net.py
index a694351629..8d8167b3d7 100644
--- a/nova/tests/unit/virt/libvirt/volume/test_net.py
+++ b/nova/tests/unit/virt/libvirt/volume/test_net.py
@@ -10,7 +10,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
import nova.conf
from nova.tests.unit.virt.libvirt.volume import test_volume
diff --git a/nova/tests/unit/virt/libvirt/volume/test_nfs.py b/nova/tests/unit/virt/libvirt/volume/test_nfs.py
index 16c41f5387..a98efaac1c 100644
--- a/nova/tests/unit/virt/libvirt/volume/test_nfs.py
+++ b/nova/tests/unit/virt/libvirt/volume/test_nfs.py
@@ -11,8 +11,8 @@
# under the License.
import os
+from unittest import mock
-import mock
from oslo_utils.fixture import uuidsentinel as uuids
from nova.tests.unit.virt.libvirt.volume import test_mount
diff --git a/nova/tests/unit/virt/libvirt/volume/test_nvme.py b/nova/tests/unit/virt/libvirt/volume/test_nvme.py
index 0d1f23d7a2..3f593841fa 100644
--- a/nova/tests/unit/virt/libvirt/volume/test_nvme.py
+++ b/nova/tests/unit/virt/libvirt/volume/test_nvme.py
@@ -10,7 +10,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from nova.tests.unit.virt.libvirt.volume import test_volume
from nova.virt.libvirt.volume import nvme
@@ -29,7 +29,21 @@ class LibvirtNVMEVolumeDriverTestCase(test_volume.LibvirtVolumeBaseTestCase):
nvme.LibvirtNVMEVolumeDriver(self.fake_host)
mock_factory.assert_called_once_with(
- initiator.NVME, 'sudo',
+ initiator.NVME, 'sudo', use_multipath=False,
+ device_scan_attempts=3)
+
+ @mock.patch('os.path.exists', return_value=True)
+ @mock.patch('nova.utils.get_root_helper')
+ @mock.patch('os_brick.initiator.connector.InitiatorConnector.factory')
+ def test_libvirt_nvme_driver_multipath(self, mock_factory, mock_helper,
+ exists):
+ self.flags(num_nvme_discover_tries=3, volume_use_multipath=True,
+ group='libvirt')
+ mock_helper.return_value = 'sudo'
+
+ nvme.LibvirtNVMEVolumeDriver(self.fake_host)
+ mock_factory.assert_called_once_with(
+ initiator.NVME, 'sudo', use_multipath=True,
device_scan_attempts=3)
@mock.patch('os_brick.initiator.connector.InitiatorConnector.factory',
@@ -42,14 +56,15 @@ class LibvirtNVMEVolumeDriverTestCase(test_volume.LibvirtVolumeBaseTestCase):
'name': 'aNVMEVolume',
'conf': config}
connection_info = {'data': disk_info}
- with mock.patch.object(nvme_driver.connector,
- 'connect_volume',
- return_value={'path': '/dev/dms1234567'}):
- nvme_driver.connect_volume(connection_info, None)
- nvme_driver.connector.connect_volume.assert_called_once_with(
- connection_info['data'])
- self.assertEqual('/dev/dms1234567',
- connection_info['data']['device_path'])
+ nvme_driver.connector.connect_volume.return_value = (
+ {'path': '/dev/dms1234567'})
+
+ nvme_driver.connect_volume(connection_info, None)
+
+ nvme_driver.connector.connect_volume.assert_called_once_with(
+ connection_info['data'])
+ self.assertEqual(
+ '/dev/dms1234567', connection_info['data']['device_path'])
@mock.patch('os_brick.initiator.connector.InitiatorConnector.factory',
new=mock.Mock(return_value=mock.Mock()))
diff --git a/nova/tests/unit/virt/libvirt/volume/test_quobyte.py b/nova/tests/unit/virt/libvirt/volume/test_quobyte.py
index 8a0c647fc8..bb3c86083c 100644
--- a/nova/tests/unit/virt/libvirt/volume/test_quobyte.py
+++ b/nova/tests/unit/virt/libvirt/volume/test_quobyte.py
@@ -16,9 +16,9 @@
import os
import traceback
+from unittest import mock
import ddt
-import mock
from oslo_concurrency import processutils
from oslo_utils import fileutils
import psutil
diff --git a/nova/tests/unit/virt/libvirt/volume/test_remotefs.py b/nova/tests/unit/virt/libvirt/volume/test_remotefs.py
index 62060bcf1e..67c126c2b1 100644
--- a/nova/tests/unit/virt/libvirt/volume/test_remotefs.py
+++ b/nova/tests/unit/virt/libvirt/volume/test_remotefs.py
@@ -13,7 +13,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from oslo_concurrency import processutils
from nova import test
diff --git a/nova/tests/unit/virt/libvirt/volume/test_scaleio.py b/nova/tests/unit/virt/libvirt/volume/test_scaleio.py
index 6d9247cd2d..f0fcba1deb 100644
--- a/nova/tests/unit/virt/libvirt/volume/test_scaleio.py
+++ b/nova/tests/unit/virt/libvirt/volume/test_scaleio.py
@@ -13,7 +13,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from nova.tests.unit.virt.libvirt.volume import test_volume
from nova.virt.libvirt.volume import scaleio
diff --git a/nova/tests/unit/virt/libvirt/volume/test_smbfs.py b/nova/tests/unit/virt/libvirt/volume/test_smbfs.py
index 2c3ea574a9..0fba137740 100644
--- a/nova/tests/unit/virt/libvirt/volume/test_smbfs.py
+++ b/nova/tests/unit/virt/libvirt/volume/test_smbfs.py
@@ -11,8 +11,8 @@
# under the License.
import os
+from unittest import mock
-import mock
from nova.tests.unit.virt.libvirt.volume import test_volume
from nova import utils
diff --git a/nova/tests/unit/virt/libvirt/volume/test_storpool.py b/nova/tests/unit/virt/libvirt/volume/test_storpool.py
index e14954f148..678d4f8eb4 100644
--- a/nova/tests/unit/virt/libvirt/volume/test_storpool.py
+++ b/nova/tests/unit/virt/libvirt/volume/test_storpool.py
@@ -13,8 +13,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
from os_brick import initiator
+from unittest import mock
from nova.tests.unit.virt.libvirt.volume import test_volume
from nova.virt.libvirt.volume import storpool as vol_sp
diff --git a/nova/tests/unit/virt/libvirt/volume/test_volume.py b/nova/tests/unit/virt/libvirt/volume/test_volume.py
index ac7bcf247d..9a3710a51d 100644
--- a/nova/tests/unit/virt/libvirt/volume/test_volume.py
+++ b/nova/tests/unit/virt/libvirt/volume/test_volume.py
@@ -13,8 +13,9 @@
# License for the specific language governing permissions and limitations
# under the License.
+from unittest import mock
+
import ddt
-import mock
from oslo_utils.fixture import uuidsentinel as uuids
from nova import exception
diff --git a/nova/tests/unit/virt/libvirt/volume/test_vzstorage.py b/nova/tests/unit/virt/libvirt/volume/test_vzstorage.py
index 883cebb55a..168efee944 100644
--- a/nova/tests/unit/virt/libvirt/volume/test_vzstorage.py
+++ b/nova/tests/unit/virt/libvirt/volume/test_vzstorage.py
@@ -11,8 +11,8 @@
# under the License.
import os
+from unittest import mock
-import mock
from os_brick.initiator import connector
from nova import exception
diff --git a/nova/tests/unit/virt/powervm/__init__.py b/nova/tests/unit/virt/powervm/__init__.py
deleted file mode 100644
index dedb6af7db..0000000000
--- a/nova/tests/unit/virt/powervm/__init__.py
+++ /dev/null
@@ -1,56 +0,0 @@
-# Copyright 2014, 2017 IBM Corp.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-from oslo_utils.fixture import uuidsentinel
-
-from nova.compute import power_state
-from nova.compute import vm_states
-from nova import objects
-
-
-TEST_FLAVOR = objects.flavor.Flavor(
- memory_mb=2048,
- swap=0,
- vcpu_weight=None,
- root_gb=10, id=2,
- name=u'm1.small',
- ephemeral_gb=0,
- rxtx_factor=1.0,
- flavorid=uuidsentinel.flav_id,
- vcpus=1)
-
-TEST_INSTANCE = objects.Instance(
- id=1,
- uuid=uuidsentinel.inst_id,
- display_name='Fake Instance',
- root_gb=10,
- ephemeral_gb=0,
- instance_type_id=TEST_FLAVOR.id,
- system_metadata={'image_os_distro': 'rhel'},
- host='host1',
- flavor=TEST_FLAVOR,
- task_state=None,
- vm_state=vm_states.STOPPED,
- power_state=power_state.SHUTDOWN,
-)
-
-IMAGE1 = {
- 'id': uuidsentinel.img_id,
- 'name': 'image1',
- 'size': 300,
- 'container_format': 'bare',
- 'disk_format': 'raw',
- 'checksum': 'b518a8ba2b152b5607aceb5703fac072',
-}
-TEST_IMAGE1 = objects.image_meta.ImageMeta.from_dict(IMAGE1)
diff --git a/nova/tests/unit/virt/powervm/disk/fake_adapter.py b/nova/tests/unit/virt/powervm/disk/fake_adapter.py
deleted file mode 100644
index c0b4962e54..0000000000
--- a/nova/tests/unit/virt/powervm/disk/fake_adapter.py
+++ /dev/null
@@ -1,52 +0,0 @@
-# Copyright 2018 IBM Corp.
-#
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from nova.virt.powervm.disk import driver as disk_dvr
-
-
-class FakeDiskAdapter(disk_dvr.DiskAdapter):
- """A fake subclass of DiskAdapter.
-
- This is done so that the abstract methods/properties can be stubbed and the
- class can be instantiated for testing.
- """
-
- def _vios_uuids(self):
- pass
-
- def _disk_match_func(self, disk_type, instance):
- pass
-
- def disconnect_disk_from_mgmt(self, vios_uuid, disk_name):
- pass
-
- def capacity(self):
- pass
-
- def capacity_used(self):
- pass
-
- def detach_disk(self, instance):
- pass
-
- def delete_disks(self, storage_elems):
- pass
-
- def create_disk_from_image(self, context, instance, image_meta):
- pass
-
- def attach_disk(self, instance, disk_info, stg_ftsk):
- pass
diff --git a/nova/tests/unit/virt/powervm/disk/test_driver.py b/nova/tests/unit/virt/powervm/disk/test_driver.py
deleted file mode 100644
index c27825801f..0000000000
--- a/nova/tests/unit/virt/powervm/disk/test_driver.py
+++ /dev/null
@@ -1,59 +0,0 @@
-# Copyright 2015, 2018 IBM Corp.
-#
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import fixtures
-import mock
-from pypowervm import const as pvm_const
-
-from nova import test
-from nova.tests.unit.virt.powervm.disk import fake_adapter
-
-
-class TestDiskAdapter(test.NoDBTestCase):
- """Unit Tests for the generic storage driver."""
-
- def setUp(self):
- super(TestDiskAdapter, self).setUp()
-
- # Return the mgmt uuid
- self.mgmt_uuid = self.useFixture(fixtures.MockPatch(
- 'nova.virt.powervm.mgmt.mgmt_uuid')).mock
- self.mgmt_uuid.return_value = 'mp_uuid'
-
- # The values (adapter and host uuid) are not used in the base.
- # Default them to None. We use the fake adapter here because we can't
- # instantiate DiskAdapter which is an abstract base class.
- self.st_adpt = fake_adapter.FakeDiskAdapter(None, None)
-
- @mock.patch("pypowervm.util.sanitize_file_name_for_api")
- def test_get_disk_name(self, mock_san):
- inst = mock.Mock()
- inst.configure_mock(name='a_name_that_is_longer_than_eight',
- uuid='01234567-abcd-abcd-abcd-123412341234')
-
- # Long
- self.assertEqual(mock_san.return_value,
- self.st_adpt._get_disk_name('type', inst))
- mock_san.assert_called_with(inst.name, prefix='type_',
- max_len=pvm_const.MaxLen.FILENAME_DEFAULT)
-
- mock_san.reset_mock()
-
- # Short
- self.assertEqual(mock_san.return_value,
- self.st_adpt._get_disk_name('type', inst, short=True))
- mock_san.assert_called_with('a_name_t_0123', prefix='t_',
- max_len=pvm_const.MaxLen.VDISK_NAME)
diff --git a/nova/tests/unit/virt/powervm/disk/test_localdisk.py b/nova/tests/unit/virt/powervm/disk/test_localdisk.py
deleted file mode 100644
index 25b8395bb2..0000000000
--- a/nova/tests/unit/virt/powervm/disk/test_localdisk.py
+++ /dev/null
@@ -1,312 +0,0 @@
-# Copyright 2015, 2018 IBM Corp.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import fixtures
-import mock
-
-from nova import exception
-from nova import test
-from oslo_utils.fixture import uuidsentinel as uuids
-from pypowervm import const as pvm_const
-from pypowervm.tasks import storage as tsk_stg
-from pypowervm.utils import transaction as pvm_tx
-from pypowervm.wrappers import storage as pvm_stg
-from pypowervm.wrappers import virtual_io_server as pvm_vios
-
-from nova.virt.powervm.disk import driver as disk_dvr
-from nova.virt.powervm.disk import localdisk
-
-
-class TestLocalDisk(test.NoDBTestCase):
- """Unit Tests for the LocalDisk storage driver."""
-
- def setUp(self):
- super(TestLocalDisk, self).setUp()
- self.adpt = mock.Mock()
-
- # The mock VIOS needs to have scsi_mappings as a list. Internals are
- # set by individual test cases as needed.
- smaps = [mock.Mock()]
- self.vio_wrap = mock.create_autospec(
- pvm_vios.VIOS, instance=True, scsi_mappings=smaps,
- uuid='vios-uuid')
-
- # Return the mgmt uuid.
- self.mgmt_uuid = self.useFixture(fixtures.MockPatch(
- 'nova.virt.powervm.mgmt.mgmt_uuid', autospec=True)).mock
- self.mgmt_uuid.return_value = 'mgmt_uuid'
-
- self.pvm_uuid = self.useFixture(fixtures.MockPatch(
- 'nova.virt.powervm.vm.get_pvm_uuid')).mock
- self.pvm_uuid.return_value = 'pvm_uuid'
-
- # Set up for the mocks for the disk adapter.
- self.mock_find_vg = self.useFixture(fixtures.MockPatch(
- 'pypowervm.tasks.storage.find_vg', autospec=True)).mock
- self.vg_uuid = uuids.vg_uuid
- self.vg = mock.Mock(spec=pvm_stg.VG, uuid=self.vg_uuid)
- self.mock_find_vg.return_value = (self.vio_wrap, self.vg)
-
- self.flags(volume_group_name='fakevg', group='powervm')
-
- # Mock the feed tasks.
- self.mock_afs = self.useFixture(fixtures.MockPatch(
- 'pypowervm.utils.transaction.FeedTask.add_functor_subtask',
- autospec=True)).mock
- self.mock_wtsk = mock.create_autospec(
- pvm_tx.WrapperTask, instance=True)
- self.mock_wtsk.configure_mock(wrapper=self.vio_wrap)
- self.mock_ftsk = mock.create_autospec(pvm_tx.FeedTask, instance=True)
- self.mock_ftsk.configure_mock(
- wrapper_tasks={'vios-uuid': self.mock_wtsk})
-
- # Create the adapter.
- self.ld_adpt = localdisk.LocalStorage(self.adpt, 'host_uuid')
-
- def test_init(self):
- # Localdisk adapter already initialized in setUp()
- # From super __init__()
- self.assertEqual(self.adpt, self.ld_adpt._adapter)
- self.assertEqual('host_uuid', self.ld_adpt._host_uuid)
- self.assertEqual('mgmt_uuid', self.ld_adpt.mp_uuid)
-
- # From LocalStorage __init__()
- self.assertEqual('fakevg', self.ld_adpt.vg_name)
- self.mock_find_vg.assert_called_once_with(self.adpt, 'fakevg')
- self.assertEqual('vios-uuid', self.ld_adpt._vios_uuid)
- self.assertEqual(self.vg_uuid, self.ld_adpt.vg_uuid)
- self.assertFalse(self.ld_adpt.capabilities['shared_storage'])
- self.assertFalse(self.ld_adpt.capabilities['has_imagecache'])
- self.assertFalse(self.ld_adpt.capabilities['snapshot'])
-
- # Assert snapshot capability is true if hosting I/O on mgmt partition.
- self.mgmt_uuid.return_value = 'vios-uuid'
- self.ld_adpt = localdisk.LocalStorage(self.adpt, 'host_uuid')
- self.assertTrue(self.ld_adpt.capabilities['snapshot'])
-
- # Assert volume_group_name is required.
- self.flags(volume_group_name=None, group='powervm')
- self.assertRaises(exception.OptRequiredIfOtherOptValue,
- localdisk.LocalStorage, self.adpt, 'host_uuid')
-
- def test_vios_uuids(self):
- self.assertEqual(['vios-uuid'], self.ld_adpt._vios_uuids)
-
- @mock.patch('pypowervm.tasks.scsi_mapper.gen_match_func', autospec=True)
- @mock.patch('nova.virt.powervm.disk.driver.DiskAdapter._get_disk_name')
- def test_disk_match_func(self, mock_disk_name, mock_gen_match):
- mock_disk_name.return_value = 'disk_name'
- func = self.ld_adpt._disk_match_func('disk_type', 'instance')
- mock_disk_name.assert_called_once_with(
- 'disk_type', 'instance', short=True)
- mock_gen_match.assert_called_once_with(
- pvm_stg.VDisk, names=['disk_name'])
- self.assertEqual(mock_gen_match.return_value, func)
-
- @mock.patch('nova.virt.powervm.disk.localdisk.LocalStorage._get_vg_wrap')
- def test_capacity(self, mock_vg):
- """Tests the capacity methods."""
- mock_vg.return_value = mock.Mock(
- capacity='5120', available_size='2048')
- self.assertEqual(5120.0, self.ld_adpt.capacity)
- self.assertEqual(3072.0, self.ld_adpt.capacity_used)
-
- @mock.patch('pypowervm.tasks.storage.rm_vg_storage', autospec=True)
- @mock.patch('nova.virt.powervm.disk.localdisk.LocalStorage._get_vg_wrap')
- def test_delete_disks(self, mock_vg, mock_rm_vg):
- self.ld_adpt.delete_disks('storage_elems')
- mock_vg.assert_called_once_with()
- mock_rm_vg.assert_called_once_with(
- mock_vg.return_value, vdisks='storage_elems')
-
- @mock.patch('pypowervm.wrappers.virtual_io_server.VIOS.get')
- @mock.patch('pypowervm.tasks.scsi_mapper.remove_maps', autospec=True)
- @mock.patch('pypowervm.tasks.scsi_mapper.gen_match_func', autospec=True)
- def test_detach_disk(self, mock_match_fn, mock_rm_maps, mock_vios):
- mock_match_fn.return_value = 'match_func'
- mock_vios.return_value = self.vio_wrap
- mock_map1 = mock.Mock(backing_storage='back_stor1')
- mock_map2 = mock.Mock(backing_storage='back_stor2')
- mock_rm_maps.return_value = [mock_map1, mock_map2]
-
- back_stores = self.ld_adpt.detach_disk('instance')
-
- self.assertEqual(['back_stor1', 'back_stor2'], back_stores)
- mock_match_fn.assert_called_once_with(pvm_stg.VDisk)
- mock_vios.assert_called_once_with(
- self.ld_adpt._adapter, uuid='vios-uuid',
- xag=[pvm_const.XAG.VIO_SMAP])
- mock_rm_maps.assert_called_with(self.vio_wrap, 'pvm_uuid',
- match_func=mock_match_fn.return_value)
- mock_vios.return_value.update.assert_called_once()
-
- @mock.patch('pypowervm.tasks.scsi_mapper.remove_vdisk_mapping',
- autospec=True)
- def test_disconnect_disk_from_mgmt(self, mock_rm_vdisk_map):
- self.ld_adpt.disconnect_disk_from_mgmt('vios-uuid', 'disk_name')
- mock_rm_vdisk_map.assert_called_with(
- self.ld_adpt._adapter, 'vios-uuid', 'mgmt_uuid',
- disk_names=['disk_name'])
-
- @mock.patch('nova.virt.powervm.disk.localdisk.LocalStorage._upload_image')
- def test_create_disk_from_image(self, mock_upload_image):
- mock_image_meta = mock.Mock()
- mock_image_meta.size = 30
- mock_upload_image.return_value = 'mock_img'
-
- self.ld_adpt.create_disk_from_image(
- 'context', 'instance', mock_image_meta)
-
- mock_upload_image.assert_called_once_with(
- 'context', 'instance', mock_image_meta)
-
- @mock.patch('nova.image.glance.API.download')
- @mock.patch('nova.virt.powervm.disk.driver.IterableToFileAdapter')
- @mock.patch('pypowervm.tasks.storage.upload_new_vdisk')
- @mock.patch('nova.virt.powervm.disk.driver.DiskAdapter._get_disk_name')
- def test_upload_image(self, mock_name, mock_upload, mock_iter, mock_dl):
- mock_meta = mock.Mock(id='1', size=1073741824, disk_format='raw')
- mock_upload.return_value = ['mock_img']
-
- mock_img = self.ld_adpt._upload_image('context', 'inst', mock_meta)
-
- self.assertEqual('mock_img', mock_img)
- mock_name.assert_called_once_with(
- disk_dvr.DiskType.BOOT, 'inst', short=True)
- mock_dl.assert_called_once_with('context', '1')
- mock_iter.assert_called_once_with(mock_dl.return_value)
- mock_upload.assert_called_once_with(
- self.adpt, 'vios-uuid', self.vg_uuid, mock_iter.return_value,
- mock_name.return_value, 1073741824, d_size=1073741824,
- upload_type=tsk_stg.UploadType.IO_STREAM, file_format='raw')
-
- @mock.patch('pypowervm.tasks.scsi_mapper.add_map', autospec=True)
- @mock.patch('pypowervm.tasks.scsi_mapper.build_vscsi_mapping',
- autospec=True)
- def test_attach_disk(self, mock_bldmap, mock_addmap):
- def test_afs(add_func):
- # Verify the internal add_func
- self.assertEqual(mock_addmap.return_value, add_func(self.vio_wrap))
- mock_bldmap.assert_called_once_with(
- self.ld_adpt._host_uuid, self.vio_wrap, 'pvm_uuid',
- 'disk_info')
- mock_addmap.assert_called_once_with(
- self.vio_wrap, mock_bldmap.return_value)
-
- self.mock_wtsk.add_functor_subtask.side_effect = test_afs
- self.ld_adpt.attach_disk('instance', 'disk_info', self.mock_ftsk)
- self.pvm_uuid.assert_called_once_with('instance')
- self.assertEqual(1, self.mock_wtsk.add_functor_subtask.call_count)
-
- @mock.patch('pypowervm.wrappers.storage.VG.get')
- def test_get_vg_wrap(self, mock_vg):
- vg_wrap = self.ld_adpt._get_vg_wrap()
- self.assertEqual(mock_vg.return_value, vg_wrap)
- mock_vg.assert_called_once_with(
- self.adpt, uuid=self.vg_uuid, parent_type=pvm_vios.VIOS,
- parent_uuid='vios-uuid')
-
- @mock.patch('pypowervm.wrappers.virtual_io_server.VIOS.get')
- @mock.patch('pypowervm.tasks.scsi_mapper.find_maps', autospec=True)
- @mock.patch('nova.virt.powervm.disk.localdisk.LocalStorage.'
- '_disk_match_func')
- def test_get_bootdisk_path(self, mock_match_fn, mock_findmaps,
- mock_vios):
- mock_vios.return_value = self.vio_wrap
-
- # No maps found
- mock_findmaps.return_value = None
- devname = self.ld_adpt.get_bootdisk_path('inst', 'vios_uuid')
- self.pvm_uuid.assert_called_once_with('inst')
- mock_match_fn.assert_called_once_with(disk_dvr.DiskType.BOOT, 'inst')
- mock_vios.assert_called_once_with(
- self.adpt, uuid='vios_uuid', xag=[pvm_const.XAG.VIO_SMAP])
- mock_findmaps.assert_called_once_with(
- self.vio_wrap.scsi_mappings,
- client_lpar_id='pvm_uuid',
- match_func=mock_match_fn.return_value)
- self.assertIsNone(devname)
-
- # Good map
- mock_lu = mock.Mock()
- mock_lu.server_adapter.backing_dev_name = 'devname'
- mock_findmaps.return_value = [mock_lu]
- devname = self.ld_adpt.get_bootdisk_path('inst', 'vios_uuid')
- self.assertEqual('devname', devname)
-
- @mock.patch('nova.virt.powervm.vm.get_instance_wrapper', autospec=True)
- @mock.patch('pypowervm.tasks.scsi_mapper.find_maps')
- @mock.patch('pypowervm.wrappers.virtual_io_server.VIOS.get')
- @mock.patch('pypowervm.wrappers.storage.VG.get', new=mock.Mock())
- def test_get_bootdisk_iter(self, mock_vios, mock_find_maps, mock_lw):
- inst, lpar_wrap, vios1 = self._bld_mocks_for_instance_disk()
- mock_lw.return_value = lpar_wrap
-
- # Good path
- mock_vios.return_value = vios1
- for vdisk, vios in self.ld_adpt._get_bootdisk_iter(inst):
- self.assertEqual(vios1.scsi_mappings[0].backing_storage, vdisk)
- self.assertEqual(vios1.uuid, vios.uuid)
- mock_vios.assert_called_once_with(
- self.adpt, uuid='vios-uuid', xag=[pvm_const.XAG.VIO_SMAP])
-
- # Not found, no storage of that name.
- mock_vios.reset_mock()
- mock_find_maps.return_value = []
- for vdisk, vios in self.ld_adpt._get_bootdisk_iter(inst):
- self.fail('Should not have found any storage elements.')
- mock_vios.assert_called_once_with(
- self.adpt, uuid='vios-uuid', xag=[pvm_const.XAG.VIO_SMAP])
-
- @mock.patch('nova.virt.powervm.disk.driver.DiskAdapter._get_bootdisk_iter',
- autospec=True)
- @mock.patch('nova.virt.powervm.vm.get_instance_wrapper', autospec=True)
- @mock.patch('pypowervm.tasks.scsi_mapper.add_vscsi_mapping', autospec=True)
- def test_connect_instance_disk_to_mgmt(self, mock_add, mock_lw, mock_iter):
- inst, lpar_wrap, vios1 = self._bld_mocks_for_instance_disk()
- mock_lw.return_value = lpar_wrap
-
- # Good path
- mock_iter.return_value = [(vios1.scsi_mappings[0].backing_storage,
- vios1)]
- vdisk, vios = self.ld_adpt.connect_instance_disk_to_mgmt(inst)
- self.assertEqual(vios1.scsi_mappings[0].backing_storage, vdisk)
- self.assertIs(vios1, vios)
- self.assertEqual(1, mock_add.call_count)
- mock_add.assert_called_with('host_uuid', vios, 'mgmt_uuid', vdisk)
-
- # add_vscsi_mapping raises. Show-stopper since only one VIOS.
- mock_add.reset_mock()
- mock_add.side_effect = Exception
- self.assertRaises(exception.InstanceDiskMappingFailed,
- self.ld_adpt.connect_instance_disk_to_mgmt, inst)
- self.assertEqual(1, mock_add.call_count)
-
- # Not found
- mock_add.reset_mock()
- mock_iter.return_value = []
- self.assertRaises(exception.InstanceDiskMappingFailed,
- self.ld_adpt.connect_instance_disk_to_mgmt, inst)
- self.assertFalse(mock_add.called)
-
- def _bld_mocks_for_instance_disk(self):
- inst = mock.Mock()
- inst.name = 'Name Of Instance'
- inst.uuid = uuids.inst_uuid
- lpar_wrap = mock.Mock()
- lpar_wrap.id = 2
- vios1 = self.vio_wrap
- back_stor_name = 'b_Name_Of__' + inst.uuid[:4]
- vios1.scsi_mappings[0].backing_storage.name = back_stor_name
- return inst, lpar_wrap, vios1
diff --git a/nova/tests/unit/virt/powervm/disk/test_ssp.py b/nova/tests/unit/virt/powervm/disk/test_ssp.py
deleted file mode 100644
index 86705dc29b..0000000000
--- a/nova/tests/unit/virt/powervm/disk/test_ssp.py
+++ /dev/null
@@ -1,424 +0,0 @@
-# Copyright 2015, 2018 IBM Corp.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import fixtures
-import mock
-from oslo_utils import uuidutils
-from pypowervm import const as pvm_const
-from pypowervm import exceptions as pvm_exc
-from pypowervm.tasks import storage as tsk_stg
-from pypowervm.utils import transaction as pvm_tx
-from pypowervm.wrappers import cluster as pvm_clust
-from pypowervm.wrappers import storage as pvm_stg
-from pypowervm.wrappers import virtual_io_server as pvm_vios
-
-from nova import exception
-from nova import test
-from nova.tests.unit.virt import powervm
-from nova.virt.powervm.disk import ssp as ssp_dvr
-from nova.virt.powervm import vm
-
-FAKE_INST_UUID = uuidutils.generate_uuid(dashed=True)
-FAKE_INST_UUID_PVM = vm.get_pvm_uuid(mock.Mock(uuid=FAKE_INST_UUID))
-
-
-class TestSSPDiskAdapter(test.NoDBTestCase):
- """Unit Tests for the LocalDisk storage driver."""
-
- def setUp(self):
- super(TestSSPDiskAdapter, self).setUp()
-
- self.inst = powervm.TEST_INSTANCE
-
- self.apt = mock.Mock()
- self.host_uuid = 'host_uuid'
-
- self.ssp_wrap = mock.create_autospec(pvm_stg.SSP, instance=True)
-
- # SSP.refresh() returns itself
- self.ssp_wrap.refresh.return_value = self.ssp_wrap
- self.node1 = mock.create_autospec(pvm_clust.Node, instance=True)
- self.node2 = mock.create_autospec(pvm_clust.Node, instance=True)
- self.clust_wrap = mock.create_autospec(
- pvm_clust.Cluster, instance=True)
- self.clust_wrap.nodes = [self.node1, self.node2]
- self.clust_wrap.refresh.return_value = self.clust_wrap
- self.tier_wrap = mock.create_autospec(pvm_stg.Tier, instance=True)
- # Tier.refresh() returns itself
- self.tier_wrap.refresh.return_value = self.tier_wrap
- self.vio_wrap = mock.create_autospec(pvm_vios.VIOS, instance=True)
-
- # For _cluster
- self.mock_clust = self.useFixture(fixtures.MockPatch(
- 'pypowervm.wrappers.cluster.Cluster', autospec=True)).mock
- self.mock_clust.get.return_value = [self.clust_wrap]
-
- # For _ssp
- self.mock_ssp_gbhref = self.useFixture(fixtures.MockPatch(
- 'pypowervm.wrappers.storage.SSP.get_by_href')).mock
- self.mock_ssp_gbhref.return_value = self.ssp_wrap
-
- # For _tier
- self.mock_get_tier = self.useFixture(fixtures.MockPatch(
- 'pypowervm.tasks.storage.default_tier_for_ssp',
- autospec=True)).mock
- self.mock_get_tier.return_value = self.tier_wrap
-
- # A FeedTask
- self.mock_wtsk = mock.create_autospec(
- pvm_tx.WrapperTask, instance=True)
- self.mock_wtsk.configure_mock(wrapper=self.vio_wrap)
- self.mock_ftsk = mock.create_autospec(pvm_tx.FeedTask, instance=True)
- self.mock_afs = self.mock_ftsk.add_functor_subtask
- self.mock_ftsk.configure_mock(
- wrapper_tasks={self.vio_wrap.uuid: self.mock_wtsk})
-
- self.pvm_uuid = self.useFixture(fixtures.MockPatch(
- 'nova.virt.powervm.vm.get_pvm_uuid')).mock
-
- # Return the mgmt uuid
- self.mgmt_uuid = self.useFixture(fixtures.MockPatch(
- 'nova.virt.powervm.mgmt.mgmt_uuid')).mock
- self.mgmt_uuid.return_value = 'mp_uuid'
-
- # The SSP disk adapter
- self.ssp_drv = ssp_dvr.SSPDiskAdapter(self.apt, self.host_uuid)
-
- def test_init(self):
- self.assertEqual(self.apt, self.ssp_drv._adapter)
- self.assertEqual(self.host_uuid, self.ssp_drv._host_uuid)
- self.mock_clust.get.assert_called_once_with(self.apt)
- self.assertEqual(self.mock_clust.get.return_value,
- [self.ssp_drv._clust])
- self.mock_ssp_gbhref.assert_called_once_with(
- self.apt, self.clust_wrap.ssp_uri)
- self.assertEqual(self.mock_ssp_gbhref.return_value, self.ssp_drv._ssp)
- self.mock_get_tier.assert_called_once_with(self.ssp_wrap)
- self.assertEqual(self.mock_get_tier.return_value, self.ssp_drv._tier)
-
- def test_init_error(self):
- # Do these in reverse order to verify we trap all of 'em
- for raiser in (self.mock_get_tier, self.mock_ssp_gbhref,
- self.mock_clust.get):
- raiser.side_effect = pvm_exc.TimeoutError("timed out")
- self.assertRaises(exception.NotFound,
- ssp_dvr.SSPDiskAdapter, self.apt, self.host_uuid)
- raiser.side_effect = ValueError
- self.assertRaises(ValueError,
- ssp_dvr.SSPDiskAdapter, self.apt, self.host_uuid)
-
- def test_capabilities(self):
- self.assertTrue(self.ssp_drv.capabilities.get('shared_storage'))
- self.assertFalse(self.ssp_drv.capabilities.get('has_imagecache'))
- self.assertTrue(self.ssp_drv.capabilities.get('snapshot'))
-
- @mock.patch('pypowervm.util.get_req_path_uuid', autospec=True)
- def test_vios_uuids(self, mock_rpu):
- mock_rpu.return_value = self.host_uuid
- vios_uuids = self.ssp_drv._vios_uuids
- self.assertEqual({self.node1.vios_uuid, self.node2.vios_uuid},
- set(vios_uuids))
- mock_rpu.assert_has_calls(
- [mock.call(node.vios_uri, preserve_case=True, root=True)
- for node in [self.node1, self.node2]])
-
- mock_rpu.reset_mock()
-
- # Test VIOSes on other nodes, which won't have uuid or url
- node1 = mock.Mock(vios_uuid=None, vios_uri='uri1')
- node2 = mock.Mock(vios_uuid='2', vios_uri=None)
- # This mock is good and should be returned
- node3 = mock.Mock(vios_uuid='3', vios_uri='uri3')
- self.clust_wrap.nodes = [node1, node2, node3]
- self.assertEqual(['3'], self.ssp_drv._vios_uuids)
- # get_req_path_uuid was only called on the good one
- mock_rpu.assert_called_once_with('uri3', preserve_case=True, root=True)
-
- def test_capacity(self):
- self.tier_wrap.capacity = 10
- self.assertAlmostEqual(10.0, self.ssp_drv.capacity)
- self.tier_wrap.refresh.assert_called_once_with()
-
- def test_capacity_used(self):
- self.ssp_wrap.capacity = 4.56
- self.ssp_wrap.free_space = 1.23
- self.assertAlmostEqual((4.56 - 1.23), self.ssp_drv.capacity_used)
- self.ssp_wrap.refresh.assert_called_once_with()
-
- @mock.patch('pypowervm.tasks.cluster_ssp.get_or_upload_image_lu',
- autospec=True)
- @mock.patch('nova.virt.powervm.disk.ssp.SSPDiskAdapter._vios_uuids',
- new_callable=mock.PropertyMock)
- @mock.patch('pypowervm.util.sanitize_file_name_for_api', autospec=True)
- @mock.patch('pypowervm.tasks.storage.crt_lu', autospec=True)
- @mock.patch('nova.image.glance.API.download')
- @mock.patch('nova.virt.powervm.disk.driver.IterableToFileAdapter',
- autospec=True)
- def test_create_disk_from_image(self, mock_it2f, mock_dl, mock_crt_lu,
- mock_san, mock_vuuid, mock_goru):
- img = powervm.TEST_IMAGE1
-
- mock_crt_lu.return_value = self.ssp_drv._ssp, 'boot_lu'
- mock_san.return_value = 'disk_name'
- mock_vuuid.return_value = ['vuuid']
-
- self.assertEqual('boot_lu', self.ssp_drv.create_disk_from_image(
- 'context', self.inst, img))
- mock_dl.assert_called_once_with('context', img.id)
- mock_san.assert_has_calls([
- mock.call(img.name, prefix='image_', suffix='_' + img.checksum),
- mock.call(self.inst.name, prefix='boot_')])
- mock_it2f.assert_called_once_with(mock_dl.return_value)
- mock_goru.assert_called_once_with(
- self.ssp_drv._tier, 'disk_name', 'vuuid',
- mock_it2f.return_value, img.size,
- upload_type=tsk_stg.UploadType.IO_STREAM)
- mock_crt_lu.assert_called_once_with(
- self.mock_get_tier.return_value, mock_san.return_value,
- self.inst.flavor.root_gb, typ=pvm_stg.LUType.DISK,
- clone=mock_goru.return_value)
-
- @mock.patch('nova.virt.powervm.disk.ssp.SSPDiskAdapter._vios_uuids',
- new_callable=mock.PropertyMock)
- @mock.patch('pypowervm.tasks.scsi_mapper.add_map', autospec=True)
- @mock.patch('pypowervm.tasks.scsi_mapper.build_vscsi_mapping',
- autospec=True)
- @mock.patch('pypowervm.wrappers.storage.LU', autospec=True)
- def test_connect_disk(self, mock_lu, mock_bldmap, mock_addmap,
- mock_vio_uuids):
- disk_info = mock.Mock()
- disk_info.configure_mock(name='dname', udid='dudid')
- mock_vio_uuids.return_value = [self.vio_wrap.uuid]
-
- def test_afs(add_func):
- # Verify the internal add_func
- self.assertEqual(mock_addmap.return_value, add_func(self.vio_wrap))
- mock_bldmap.assert_called_once_with(
- self.host_uuid, self.vio_wrap, self.pvm_uuid.return_value,
- mock_lu.bld_ref.return_value)
- mock_addmap.assert_called_once_with(
- self.vio_wrap, mock_bldmap.return_value)
- self.mock_wtsk.add_functor_subtask.side_effect = test_afs
-
- self.ssp_drv.attach_disk(self.inst, disk_info, self.mock_ftsk)
- mock_lu.bld_ref.assert_called_once_with(self.apt, 'dname', 'dudid')
- self.pvm_uuid.assert_called_once_with(self.inst)
- self.assertEqual(1, self.mock_wtsk.add_functor_subtask.call_count)
-
- @mock.patch('pypowervm.tasks.storage.rm_tier_storage', autospec=True)
- def test_delete_disks(self, mock_rm_tstor):
- self.ssp_drv.delete_disks(['disk1', 'disk2'])
- mock_rm_tstor.assert_called_once_with(['disk1', 'disk2'],
- tier=self.ssp_drv._tier)
-
- @mock.patch('nova.virt.powervm.disk.ssp.SSPDiskAdapter._vios_uuids',
- new_callable=mock.PropertyMock)
- @mock.patch('pypowervm.tasks.scsi_mapper.find_maps', autospec=True)
- @mock.patch('pypowervm.tasks.scsi_mapper.remove_maps', autospec=True)
- @mock.patch('pypowervm.tasks.scsi_mapper.gen_match_func', autospec=True)
- @mock.patch('pypowervm.tasks.partition.build_active_vio_feed_task',
- autospec=True)
- def test_disconnect_disk(self, mock_bld_ftsk, mock_gmf, mock_rmmaps,
- mock_findmaps, mock_vio_uuids):
- mock_vio_uuids.return_value = [self.vio_wrap.uuid]
- mock_bld_ftsk.return_value = self.mock_ftsk
- lu1, lu2 = [mock.create_autospec(pvm_stg.LU, instance=True)] * 2
- # Two mappings have the same LU, to verify set behavior
- mock_findmaps.return_value = [
- mock.Mock(spec=pvm_vios.VSCSIMapping, backing_storage=lu)
- for lu in (lu1, lu2, lu1)]
-
- def test_afs(rm_func):
- # verify the internal rm_func
- self.assertEqual(mock_rmmaps.return_value, rm_func(self.vio_wrap))
- mock_rmmaps.assert_called_once_with(
- self.vio_wrap, self.pvm_uuid.return_value,
- match_func=mock_gmf.return_value)
- self.mock_wtsk.add_functor_subtask.side_effect = test_afs
-
- self.assertEqual(
- {lu1, lu2}, set(self.ssp_drv.detach_disk(self.inst)))
- mock_bld_ftsk.assert_called_once_with(
- self.apt, name='ssp', xag=[pvm_const.XAG.VIO_SMAP])
- self.pvm_uuid.assert_called_once_with(self.inst)
- mock_gmf.assert_called_once_with(pvm_stg.LU)
- self.assertEqual(1, self.mock_wtsk.add_functor_subtask.call_count)
- mock_findmaps.assert_called_once_with(
- self.vio_wrap.scsi_mappings,
- client_lpar_id=self.pvm_uuid.return_value,
- match_func=mock_gmf.return_value)
- self.mock_ftsk.execute.assert_called_once_with()
-
- @mock.patch('pypowervm.wrappers.virtual_io_server.VIOS.get')
- @mock.patch('pypowervm.tasks.scsi_mapper.find_maps', autospec=True)
- @mock.patch('nova.virt.powervm.disk.ssp.SSPDiskAdapter._disk_match_func')
- def test_get_bootdisk_path(self, mock_match_fn, mock_findmaps,
- mock_vios):
- mock_vios.return_value = self.vio_wrap
-
- # No maps found
- mock_findmaps.return_value = None
- devname = self.ssp_drv.get_bootdisk_path('inst', 'vios_uuid')
- mock_vios.assert_called_once_with(
- self.apt, uuid='vios_uuid', xag=[pvm_const.XAG.VIO_SMAP])
- mock_findmaps.assert_called_once_with(
- self.vio_wrap.scsi_mappings,
- client_lpar_id=self.pvm_uuid.return_value,
- match_func=mock_match_fn.return_value)
- self.assertIsNone(devname)
-
- # Good map
- mock_lu = mock.Mock()
- mock_lu.server_adapter.backing_dev_name = 'devname'
- mock_findmaps.return_value = [mock_lu]
- devname = self.ssp_drv.get_bootdisk_path('inst', 'vios_uuid')
- self.assertEqual('devname', devname)
-
- @mock.patch('nova.virt.powervm.disk.ssp.SSPDiskAdapter.'
- '_vios_uuids', new_callable=mock.PropertyMock)
- @mock.patch('nova.virt.powervm.vm.get_instance_wrapper', autospec=True)
- @mock.patch('pypowervm.wrappers.virtual_io_server.VIOS.get')
- @mock.patch('pypowervm.tasks.scsi_mapper.add_vscsi_mapping', autospec=True)
- def test_connect_instance_disk_to_mgmt(self, mock_add, mock_vio_get,
- mock_lw, mock_vio_uuids):
- inst, lpar_wrap, vio1, vio2, vio3 = self._bld_mocks_for_instance_disk()
- mock_lw.return_value = lpar_wrap
- mock_vio_uuids.return_value = [1, 2]
-
- # Test with two VIOSes, both of which contain the mapping
- mock_vio_get.side_effect = [vio1, vio2]
- lu, vios = self.ssp_drv.connect_instance_disk_to_mgmt(inst)
- self.assertEqual('lu_udid', lu.udid)
- # Should hit on the first VIOS
- self.assertIs(vio1, vios)
- mock_add.assert_called_once_with(self.host_uuid, vio1, 'mp_uuid', lu)
-
- # Now the first VIOS doesn't have the mapping, but the second does
- mock_add.reset_mock()
- mock_vio_get.side_effect = [vio3, vio2]
- lu, vios = self.ssp_drv.connect_instance_disk_to_mgmt(inst)
- self.assertEqual('lu_udid', lu.udid)
- # Should hit on the second VIOS
- self.assertIs(vio2, vios)
- self.assertEqual(1, mock_add.call_count)
- mock_add.assert_called_once_with(self.host_uuid, vio2, 'mp_uuid', lu)
-
- # No hits
- mock_add.reset_mock()
- mock_vio_get.side_effect = [vio3, vio3]
- self.assertRaises(exception.InstanceDiskMappingFailed,
- self.ssp_drv.connect_instance_disk_to_mgmt, inst)
- self.assertEqual(0, mock_add.call_count)
-
- # First add_vscsi_mapping call raises
- mock_vio_get.side_effect = [vio1, vio2]
- mock_add.side_effect = [Exception("mapping failed"), None]
- # Should hit on the second VIOS
- self.assertIs(vio2, vios)
-
- @mock.patch('pypowervm.tasks.scsi_mapper.remove_lu_mapping', autospec=True)
- def test_disconnect_disk_from_mgmt(self, mock_rm_lu_map):
- self.ssp_drv.disconnect_disk_from_mgmt('vios_uuid', 'disk_name')
- mock_rm_lu_map.assert_called_with(self.apt, 'vios_uuid',
- 'mp_uuid', disk_names=['disk_name'])
-
- @mock.patch('pypowervm.tasks.scsi_mapper.gen_match_func', autospec=True)
- @mock.patch('nova.virt.powervm.disk.ssp.SSPDiskAdapter._get_disk_name')
- def test_disk_match_func(self, mock_disk_name, mock_gen_match):
- mock_disk_name.return_value = 'disk_name'
- self.ssp_drv._disk_match_func('disk_type', 'instance')
- mock_disk_name.assert_called_once_with('disk_type', 'instance')
- mock_gen_match.assert_called_with(pvm_stg.LU, names=['disk_name'])
-
- @mock.patch('nova.virt.powervm.disk.ssp.SSPDiskAdapter.'
- '_vios_uuids', new_callable=mock.PropertyMock)
- @mock.patch('nova.virt.powervm.vm.get_instance_wrapper', autospec=True)
- @mock.patch('pypowervm.wrappers.virtual_io_server.VIOS.get')
- def test_get_bootdisk_iter(self, mock_vio_get, mock_lw, mock_vio_uuids):
- inst, lpar_wrap, vio1, vio2, vio3 = self._bld_mocks_for_instance_disk()
- mock_lw.return_value = lpar_wrap
- mock_vio_uuids.return_value = [1, 2]
-
- # Test with two VIOSes, both of which contain the mapping. Force the
- # method to get the lpar_wrap.
- mock_vio_get.side_effect = [vio1, vio2]
- idi = self.ssp_drv._get_bootdisk_iter(inst)
- lu, vios = next(idi)
- self.assertEqual('lu_udid', lu.udid)
- self.assertEqual('vios1', vios.name)
- mock_vio_get.assert_called_once_with(self.apt, uuid=1,
- xag=[pvm_const.XAG.VIO_SMAP])
- lu, vios = next(idi)
- self.assertEqual('lu_udid', lu.udid)
- self.assertEqual('vios2', vios.name)
- mock_vio_get.assert_called_with(self.apt, uuid=2,
- xag=[pvm_const.XAG.VIO_SMAP])
- self.assertRaises(StopIteration, next, idi)
- self.assertEqual(2, mock_vio_get.call_count)
- mock_lw.assert_called_once_with(self.apt, inst)
-
- # Same, but prove that breaking out of the loop early avoids the second
- # get call. Supply lpar_wrap from here on, and prove no calls to
- # get_instance_wrapper
- mock_vio_get.reset_mock()
- mock_lw.reset_mock()
- mock_vio_get.side_effect = [vio1, vio2]
- for lu, vios in self.ssp_drv._get_bootdisk_iter(inst):
- self.assertEqual('lu_udid', lu.udid)
- self.assertEqual('vios1', vios.name)
- break
- mock_vio_get.assert_called_once_with(self.apt, uuid=1,
- xag=[pvm_const.XAG.VIO_SMAP])
-
- # Now the first VIOS doesn't have the mapping, but the second does
- mock_vio_get.reset_mock()
- mock_vio_get.side_effect = [vio3, vio2]
- idi = self.ssp_drv._get_bootdisk_iter(inst)
- lu, vios = next(idi)
- self.assertEqual('lu_udid', lu.udid)
- self.assertEqual('vios2', vios.name)
- mock_vio_get.assert_has_calls(
- [mock.call(self.apt, uuid=uuid, xag=[pvm_const.XAG.VIO_SMAP])
- for uuid in (1, 2)])
- self.assertRaises(StopIteration, next, idi)
- self.assertEqual(2, mock_vio_get.call_count)
-
- # No hits
- mock_vio_get.reset_mock()
- mock_vio_get.side_effect = [vio3, vio3]
- self.assertEqual([], list(self.ssp_drv._get_bootdisk_iter(inst)))
- self.assertEqual(2, mock_vio_get.call_count)
-
- def _bld_mocks_for_instance_disk(self):
- inst = mock.Mock()
- inst.name = 'my-instance-name'
- lpar_wrap = mock.Mock()
- lpar_wrap.id = 4
- lu_wrap = mock.Mock(spec=pvm_stg.LU)
- lu_wrap.configure_mock(name='boot_my_instance_name', udid='lu_udid')
- smap = mock.Mock(backing_storage=lu_wrap,
- server_adapter=mock.Mock(lpar_id=4))
- # Build mock VIOS Wrappers as the returns from VIOS.wrap.
- # vios1 and vios2 will both have the mapping for client ID 4 and LU
- # named boot_my_instance_name.
- smaps = [mock.Mock(), mock.Mock(), mock.Mock(), smap]
- vios1 = mock.Mock(spec=pvm_vios.VIOS)
- vios1.configure_mock(name='vios1', uuid='uuid1', scsi_mappings=smaps)
- vios2 = mock.Mock(spec=pvm_vios.VIOS)
- vios2.configure_mock(name='vios2', uuid='uuid2', scsi_mappings=smaps)
- # vios3 will not have the mapping
- vios3 = mock.Mock(spec=pvm_vios.VIOS)
- vios3.configure_mock(name='vios3', uuid='uuid3',
- scsi_mappings=[mock.Mock(), mock.Mock()])
- return inst, lpar_wrap, vios1, vios2, vios3
diff --git a/nova/tests/unit/virt/powervm/tasks/test_image.py b/nova/tests/unit/virt/powervm/tasks/test_image.py
deleted file mode 100644
index b9e3560a16..0000000000
--- a/nova/tests/unit/virt/powervm/tasks/test_image.py
+++ /dev/null
@@ -1,68 +0,0 @@
-# Copyright 2015, 2018 IBM Corp.
-#
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import mock
-
-from nova import test
-
-from nova.virt.powervm.tasks import image as tsk_img
-
-
-class TestImage(test.TestCase):
- def test_update_task_state(self):
- def func(task_state, expected_state='delirious'):
- self.assertEqual('task_state', task_state)
- self.assertEqual('delirious', expected_state)
- tf = tsk_img.UpdateTaskState(func, 'task_state')
- self.assertEqual('update_task_state_task_state', tf.name)
- tf.execute()
-
- def func2(task_state, expected_state=None):
- self.assertEqual('task_state', task_state)
- self.assertEqual('expected_state', expected_state)
- tf = tsk_img.UpdateTaskState(func2, 'task_state',
- expected_state='expected_state')
- tf.execute()
-
- # Validate args on taskflow.task.Task instantiation
- with mock.patch('taskflow.task.Task.__init__') as tf:
- tsk_img.UpdateTaskState(func, 'task_state')
- tf.assert_called_once_with(
- name='update_task_state_task_state')
-
- @mock.patch('nova.virt.powervm.image.stream_blockdev_to_glance',
- autospec=True)
- @mock.patch('nova.virt.powervm.image.generate_snapshot_metadata',
- autospec=True)
- def test_stream_to_glance(self, mock_metadata, mock_stream):
- mock_metadata.return_value = 'metadata'
- mock_inst = mock.Mock()
- mock_inst.name = 'instance_name'
- tf = tsk_img.StreamToGlance('context', 'image_api', 'image_id',
- mock_inst)
- self.assertEqual('stream_to_glance', tf.name)
- tf.execute('disk_path')
- mock_metadata.assert_called_with('context', 'image_api', 'image_id',
- mock_inst)
- mock_stream.assert_called_with('context', 'image_api', 'image_id',
- 'metadata', 'disk_path')
-
- # Validate args on taskflow.task.Task instantiation
- with mock.patch('taskflow.task.Task.__init__') as tf:
- tsk_img.StreamToGlance(
- 'context', 'image_api', 'image_id', mock_inst)
- tf.assert_called_once_with(
- name='stream_to_glance', requires='disk_path')
diff --git a/nova/tests/unit/virt/powervm/tasks/test_network.py b/nova/tests/unit/virt/powervm/tasks/test_network.py
deleted file mode 100644
index 9d6951eceb..0000000000
--- a/nova/tests/unit/virt/powervm/tasks/test_network.py
+++ /dev/null
@@ -1,323 +0,0 @@
-# Copyright 2015, 2017 IBM Corp.
-#
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import copy
-
-import eventlet
-import mock
-from pypowervm.wrappers import network as pvm_net
-
-from nova import exception
-from nova import test
-from nova.tests.unit.virt import powervm
-from nova.virt.powervm.tasks import network as tf_net
-
-
-def cna(mac):
- """Builds a mock Client Network Adapter for unit tests."""
- return mock.MagicMock(mac=mac, vswitch_uri='fake_href')
-
-
-class TestNetwork(test.NoDBTestCase):
- def setUp(self):
- super(TestNetwork, self).setUp()
- self.flags(host='host1')
- self.apt = mock.Mock()
-
- self.mock_lpar_wrap = mock.MagicMock()
- self.mock_lpar_wrap.can_modify_io.return_value = True, None
-
- @mock.patch('nova.virt.powervm.vm.get_instance_wrapper')
- @mock.patch('nova.virt.powervm.vif.unplug')
- @mock.patch('nova.virt.powervm.vm.get_cnas')
- def test_unplug_vifs(self, mock_vm_get, mock_unplug, mock_get_wrap):
- """Tests that a delete of the vif can be done."""
- inst = powervm.TEST_INSTANCE
-
- # Mock up the CNA responses.
- cnas = [cna('AABBCCDDEEFF'), cna('AABBCCDDEE11'), cna('AABBCCDDEE22')]
- mock_vm_get.return_value = cnas
-
- # Mock up the network info. This also validates that they will be
- # sanitized to upper case.
- net_info = [
- {'address': 'aa:bb:cc:dd:ee:ff'}, {'address': 'aa:bb:cc:dd:ee:22'},
- {'address': 'aa:bb:cc:dd:ee:33'}
- ]
-
- # Mock out the instance wrapper
- mock_get_wrap.return_value = self.mock_lpar_wrap
-
- # Mock out the vif driver
- def validate_unplug(adapter, instance, vif, cna_w_list=None):
- self.assertEqual(adapter, self.apt)
- self.assertEqual(instance, inst)
- self.assertIn(vif, net_info)
- self.assertEqual(cna_w_list, cnas)
-
- mock_unplug.side_effect = validate_unplug
-
- # Run method
- p_vifs = tf_net.UnplugVifs(self.apt, inst, net_info)
- p_vifs.execute()
-
- # Make sure the unplug was invoked, so that we know that the validation
- # code was called
- self.assertEqual(3, mock_unplug.call_count)
-
- # Validate args on taskflow.task.Task instantiation
- with mock.patch('taskflow.task.Task.__init__') as tf:
- tf_net.UnplugVifs(self.apt, inst, net_info)
- tf.assert_called_once_with(name='unplug_vifs')
-
- @mock.patch('nova.virt.powervm.vm.get_instance_wrapper')
- def test_unplug_vifs_invalid_state(self, mock_get_wrap):
- """Tests that the delete raises an exception if bad VM state."""
- inst = powervm.TEST_INSTANCE
-
- # Mock out the instance wrapper
- mock_get_wrap.return_value = self.mock_lpar_wrap
-
- # Mock that the state is incorrect
- self.mock_lpar_wrap.can_modify_io.return_value = False, 'bad'
-
- # Run method
- p_vifs = tf_net.UnplugVifs(self.apt, inst, mock.Mock())
- self.assertRaises(exception.VirtualInterfaceUnplugException,
- p_vifs.execute)
-
- @mock.patch('nova.virt.powervm.vif.plug')
- @mock.patch('nova.virt.powervm.vm.get_cnas')
- def test_plug_vifs_rmc(self, mock_cna_get, mock_plug):
- """Tests that a crt vif can be done with secure RMC."""
- inst = powervm.TEST_INSTANCE
-
- # Mock up the CNA response. One should already exist, the other
- # should not.
- pre_cnas = [cna('AABBCCDDEEFF'), cna('AABBCCDDEE11')]
- mock_cna_get.return_value = copy.deepcopy(pre_cnas)
-
- # Mock up the network info. This also validates that they will be
- # sanitized to upper case.
- net_info = [
- {'address': 'aa:bb:cc:dd:ee:ff', 'vnic_type': 'normal'},
- {'address': 'aa:bb:cc:dd:ee:22', 'vnic_type': 'normal'},
- ]
-
- # First run the CNA update, then the CNA create.
- mock_new_cna = mock.Mock(spec=pvm_net.CNA)
- mock_plug.side_effect = ['upd_cna', mock_new_cna]
-
- # Run method
- p_vifs = tf_net.PlugVifs(mock.MagicMock(), self.apt, inst, net_info)
-
- all_cnas = p_vifs.execute(self.mock_lpar_wrap)
-
- # new vif should be created twice.
- mock_plug.assert_any_call(self.apt, inst, net_info[0], new_vif=False)
- mock_plug.assert_any_call(self.apt, inst, net_info[1], new_vif=True)
-
- # The Task provides the list of original CNAs plus only CNAs that were
- # created.
- self.assertEqual(pre_cnas + [mock_new_cna], all_cnas)
-
- # Validate args on taskflow.task.Task instantiation
- with mock.patch('taskflow.task.Task.__init__') as tf:
- tf_net.PlugVifs(mock.MagicMock(), self.apt, inst, net_info)
- tf.assert_called_once_with(
- name='plug_vifs', provides='vm_cnas', requires=['lpar_wrap'])
-
- @mock.patch('nova.virt.powervm.vif.plug')
- @mock.patch('nova.virt.powervm.vm.get_cnas')
- def test_plug_vifs_rmc_no_create(self, mock_vm_get, mock_plug):
- """Verifies if no creates are needed, none are done."""
- inst = powervm.TEST_INSTANCE
-
- # Mock up the CNA response. Both should already exist.
- mock_vm_get.return_value = [cna('AABBCCDDEEFF'), cna('AABBCCDDEE11')]
-
- # Mock up the network info. This also validates that they will be
- # sanitized to upper case. This also validates that we don't call
- # get_vnics if no nets have vnic_type 'direct'.
- net_info = [
- {'address': 'aa:bb:cc:dd:ee:ff', 'vnic_type': 'normal'},
- {'address': 'aa:bb:cc:dd:ee:11', 'vnic_type': 'normal'}
- ]
-
- # Run method
- p_vifs = tf_net.PlugVifs(mock.MagicMock(), self.apt, inst, net_info)
- p_vifs.execute(self.mock_lpar_wrap)
-
- # The create should have been called with new_vif as False.
- mock_plug.assert_any_call(self.apt, inst, net_info[0], new_vif=False)
- mock_plug.assert_any_call(self.apt, inst, net_info[1], new_vif=False)
-
- @mock.patch('nova.virt.powervm.vif.plug')
- @mock.patch('nova.virt.powervm.vm.get_cnas')
- def test_plug_vifs_invalid_state(self, mock_vm_get, mock_plug):
- """Tests that a crt_vif fails when the LPAR state is bad."""
- inst = powervm.TEST_INSTANCE
-
- # Mock up the CNA response. Only doing one for simplicity
- mock_vm_get.return_value = []
- net_info = [{'address': 'aa:bb:cc:dd:ee:ff', 'vnic_type': 'normal'}]
-
- # Mock that the state is incorrect
- self.mock_lpar_wrap.can_modify_io.return_value = False, 'bad'
-
- # Run method
- p_vifs = tf_net.PlugVifs(mock.MagicMock(), self.apt, inst, net_info)
- self.assertRaises(exception.VirtualInterfaceCreateException,
- p_vifs.execute, self.mock_lpar_wrap)
-
- # The create should not have been invoked
- self.assertEqual(0, mock_plug.call_count)
-
- @mock.patch('nova.virt.powervm.vif.plug')
- @mock.patch('nova.virt.powervm.vm.get_cnas')
- def test_plug_vifs_timeout(self, mock_vm_get, mock_plug):
- """Tests that crt vif failure via loss of neutron callback."""
- inst = powervm.TEST_INSTANCE
-
- # Mock up the CNA response. Only doing one for simplicity
- mock_vm_get.return_value = [cna('AABBCCDDEE11')]
-
- # Mock up the network info.
- net_info = [{'address': 'aa:bb:cc:dd:ee:ff', 'vnic_type': 'normal'}]
-
- # Ensure that an exception is raised by a timeout.
- mock_plug.side_effect = eventlet.timeout.Timeout()
-
- # Run method
- p_vifs = tf_net.PlugVifs(mock.MagicMock(), self.apt, inst, net_info)
- self.assertRaises(exception.VirtualInterfaceCreateException,
- p_vifs.execute, self.mock_lpar_wrap)
-
- # The create should have only been called once.
- self.assertEqual(1, mock_plug.call_count)
-
- @mock.patch('nova.virt.powervm.vif.unplug')
- @mock.patch('nova.virt.powervm.vif.plug')
- @mock.patch('nova.virt.powervm.vm.get_cnas')
- def test_plug_vifs_revert(self, mock_vm_get, mock_plug, mock_unplug):
- """Tests that the revert flow works properly."""
- inst = powervm.TEST_INSTANCE
-
- # Fake CNA list. The one pre-existing VIF should *not* get reverted.
- cna_list = [cna('AABBCCDDEEFF'), cna('FFEEDDCCBBAA')]
- mock_vm_get.return_value = cna_list
-
- # Mock up the network info. Three roll backs.
- net_info = [
- {'address': 'aa:bb:cc:dd:ee:ff', 'vnic_type': 'normal'},
- {'address': 'aa:bb:cc:dd:ee:22', 'vnic_type': 'normal'},
- {'address': 'aa:bb:cc:dd:ee:33', 'vnic_type': 'normal'}
- ]
-
- # Make sure we test raising an exception
- mock_unplug.side_effect = [exception.NovaException(), None]
-
- # Run method
- p_vifs = tf_net.PlugVifs(mock.MagicMock(), self.apt, inst, net_info)
- p_vifs.execute(self.mock_lpar_wrap)
- p_vifs.revert(self.mock_lpar_wrap, mock.Mock(), mock.Mock())
-
- # The unplug should be called twice. The exception shouldn't stop the
- # second call.
- self.assertEqual(2, mock_unplug.call_count)
-
- # Make sure each call is invoked correctly. The first plug was not a
- # new vif, so it should not be reverted.
- c2 = mock.call(self.apt, inst, net_info[1], cna_w_list=cna_list)
- c3 = mock.call(self.apt, inst, net_info[2], cna_w_list=cna_list)
- mock_unplug.assert_has_calls([c2, c3])
-
- @mock.patch('pypowervm.tasks.cna.crt_cna')
- @mock.patch('pypowervm.wrappers.network.VSwitch.search')
- @mock.patch('nova.virt.powervm.vif.plug')
- @mock.patch('nova.virt.powervm.vm.get_cnas')
- def test_plug_mgmt_vif(self, mock_vm_get, mock_plug, mock_vs_search,
- mock_crt_cna):
- """Tests that a mgmt vif can be created."""
- inst = powervm.TEST_INSTANCE
-
- # Mock up the rmc vswitch
- vswitch_w = mock.MagicMock()
- vswitch_w.href = 'fake_mgmt_uri'
- mock_vs_search.return_value = [vswitch_w]
-
- # Run method such that it triggers a fresh CNA search
- p_vifs = tf_net.PlugMgmtVif(self.apt, inst)
- p_vifs.execute(None)
-
- # With the default get_cnas mock (which returns a Mock()), we think we
- # found an existing management CNA.
- mock_crt_cna.assert_not_called()
- mock_vm_get.assert_called_once_with(
- self.apt, inst, vswitch_uri='fake_mgmt_uri')
-
- # Now mock get_cnas to return no hits
- mock_vm_get.reset_mock()
- mock_vm_get.return_value = []
- p_vifs.execute(None)
-
- # Get was called; and since it didn't have the mgmt CNA, so was plug.
- self.assertEqual(1, mock_crt_cna.call_count)
- mock_vm_get.assert_called_once_with(
- self.apt, inst, vswitch_uri='fake_mgmt_uri')
-
- # Now pass CNAs, but not the mgmt vif, "from PlugVifs"
- cnas = [mock.Mock(vswitch_uri='uri1'), mock.Mock(vswitch_uri='uri2')]
- mock_crt_cna.reset_mock()
- mock_vm_get.reset_mock()
- p_vifs.execute(cnas)
-
- # Get wasn't called, since the CNAs were passed "from PlugVifs"; but
- # since the mgmt vif wasn't included, plug was called.
- mock_vm_get.assert_not_called()
- mock_crt_cna.assert_called()
-
- # Finally, pass CNAs including the mgmt.
- cnas.append(mock.Mock(vswitch_uri='fake_mgmt_uri'))
- mock_crt_cna.reset_mock()
- p_vifs.execute(cnas)
-
- # Neither get nor plug was called.
- mock_vm_get.assert_not_called()
- mock_crt_cna.assert_not_called()
-
- # Validate args on taskflow.task.Task instantiation
- with mock.patch('taskflow.task.Task.__init__') as tf:
- tf_net.PlugMgmtVif(self.apt, inst)
- tf.assert_called_once_with(
- name='plug_mgmt_vif', provides='mgmt_cna', requires=['vm_cnas'])
-
- def test_get_vif_events(self):
- # Set up common mocks.
- inst = powervm.TEST_INSTANCE
- net_info = [mock.MagicMock(), mock.MagicMock()]
- net_info[0]['id'] = 'a'
- net_info[0].get.return_value = False
- net_info[1]['id'] = 'b'
- net_info[1].get.return_value = True
-
- # Set up the runner.
- p_vifs = tf_net.PlugVifs(mock.MagicMock(), self.apt, inst, net_info)
- p_vifs.crt_network_infos = net_info
- resp = p_vifs._get_vif_events()
-
- # Only one should be returned since only one was active.
- self.assertEqual(1, len(resp))
diff --git a/nova/tests/unit/virt/powervm/tasks/test_storage.py b/nova/tests/unit/virt/powervm/tasks/test_storage.py
deleted file mode 100644
index 39fe9dec72..0000000000
--- a/nova/tests/unit/virt/powervm/tasks/test_storage.py
+++ /dev/null
@@ -1,354 +0,0 @@
-# Copyright 2015, 2018 IBM Corp.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import fixtures
-import mock
-from pypowervm import exceptions as pvm_exc
-
-from nova import exception
-from nova import test
-from nova.virt.powervm.tasks import storage as tf_stg
-
-
-class TestStorage(test.NoDBTestCase):
-
- def setUp(self):
- super(TestStorage, self).setUp()
-
- self.adapter = mock.Mock()
- self.disk_dvr = mock.MagicMock()
- self.mock_cfg_drv = self.useFixture(fixtures.MockPatch(
- 'nova.virt.powervm.media.ConfigDrivePowerVM')).mock
- self.mock_mb = self.mock_cfg_drv.return_value
- self.instance = mock.MagicMock()
- self.context = 'context'
-
- def test_create_and_connect_cfg_drive(self):
- # With a specified FeedTask
- task = tf_stg.CreateAndConnectCfgDrive(
- self.adapter, self.instance, 'injected_files',
- 'network_info', 'stg_ftsk', admin_pass='admin_pass')
- task.execute('mgmt_cna')
- self.mock_cfg_drv.assert_called_once_with(self.adapter)
- self.mock_mb.create_cfg_drv_vopt.assert_called_once_with(
- self.instance, 'injected_files', 'network_info', 'stg_ftsk',
- admin_pass='admin_pass', mgmt_cna='mgmt_cna')
-
- # Normal revert
- task.revert('mgmt_cna', 'result', 'flow_failures')
- self.mock_mb.dlt_vopt.assert_called_once_with(self.instance,
- 'stg_ftsk')
-
- self.mock_mb.reset_mock()
-
- # Revert when dlt_vopt fails
- self.mock_mb.dlt_vopt.side_effect = pvm_exc.Error('fake-exc')
- task.revert('mgmt_cna', 'result', 'flow_failures')
- self.mock_mb.dlt_vopt.assert_called_once()
-
- self.mock_mb.reset_mock()
-
- # Revert when media builder not created
- task.mb = None
- task.revert('mgmt_cna', 'result', 'flow_failures')
- self.mock_mb.assert_not_called()
-
- # Validate args on taskflow.task.Task instantiation
- with mock.patch('taskflow.task.Task.__init__') as tf:
- tf_stg.CreateAndConnectCfgDrive(
- self.adapter, self.instance, 'injected_files',
- 'network_info', 'stg_ftsk', admin_pass='admin_pass')
- tf.assert_called_once_with(name='cfg_drive', requires=['mgmt_cna'])
-
- def test_delete_vopt(self):
- # Test with no FeedTask
- task = tf_stg.DeleteVOpt(self.adapter, self.instance)
- task.execute()
- self.mock_cfg_drv.assert_called_once_with(self.adapter)
- self.mock_mb.dlt_vopt.assert_called_once_with(
- self.instance, stg_ftsk=None)
-
- self.mock_cfg_drv.reset_mock()
- self.mock_mb.reset_mock()
-
- # With a specified FeedTask
- task = tf_stg.DeleteVOpt(self.adapter, self.instance, stg_ftsk='ftsk')
- task.execute()
- self.mock_cfg_drv.assert_called_once_with(self.adapter)
- self.mock_mb.dlt_vopt.assert_called_once_with(
- self.instance, stg_ftsk='ftsk')
-
- # Validate args on taskflow.task.Task instantiation
- with mock.patch('taskflow.task.Task.__init__') as tf:
- tf_stg.DeleteVOpt(self.adapter, self.instance)
- tf.assert_called_once_with(name='vopt_delete')
-
- def test_delete_disk(self):
- stor_adpt_mappings = mock.Mock()
-
- task = tf_stg.DeleteDisk(self.disk_dvr)
- task.execute(stor_adpt_mappings)
- self.disk_dvr.delete_disks.assert_called_once_with(stor_adpt_mappings)
-
- # Validate args on taskflow.task.Task instantiation
- with mock.patch('taskflow.task.Task.__init__') as tf:
- tf_stg.DeleteDisk(self.disk_dvr)
- tf.assert_called_once_with(
- name='delete_disk', requires=['stor_adpt_mappings'])
-
- def test_detach_disk(self):
- task = tf_stg.DetachDisk(self.disk_dvr, self.instance)
- task.execute()
- self.disk_dvr.detach_disk.assert_called_once_with(self.instance)
-
- # Validate args on taskflow.task.Task instantiation
- with mock.patch('taskflow.task.Task.__init__') as tf:
- tf_stg.DetachDisk(self.disk_dvr, self.instance)
- tf.assert_called_once_with(
- name='detach_disk', provides='stor_adpt_mappings')
-
- def test_attach_disk(self):
- stg_ftsk = mock.Mock()
- disk_dev_info = mock.Mock()
-
- task = tf_stg.AttachDisk(self.disk_dvr, self.instance, stg_ftsk)
- task.execute(disk_dev_info)
- self.disk_dvr.attach_disk.assert_called_once_with(
- self.instance, disk_dev_info, stg_ftsk)
-
- task.revert(disk_dev_info, 'result', 'flow failures')
- self.disk_dvr.detach_disk.assert_called_once_with(self.instance)
-
- self.disk_dvr.detach_disk.reset_mock()
-
- # Revert failures are not raised
- self.disk_dvr.detach_disk.side_effect = pvm_exc.TimeoutError(
- "timed out")
- task.revert(disk_dev_info, 'result', 'flow failures')
- self.disk_dvr.detach_disk.assert_called_once_with(self.instance)
-
- # Validate args on taskflow.task.Task instantiation
- with mock.patch('taskflow.task.Task.__init__') as tf:
- tf_stg.AttachDisk(self.disk_dvr, self.instance, stg_ftsk)
- tf.assert_called_once_with(
- name='attach_disk', requires=['disk_dev_info'])
-
- def test_create_disk_for_img(self):
- image_meta = mock.Mock()
-
- task = tf_stg.CreateDiskForImg(
- self.disk_dvr, self.context, self.instance, image_meta)
- task.execute()
- self.disk_dvr.create_disk_from_image.assert_called_once_with(
- self.context, self.instance, image_meta)
-
- task.revert('result', 'flow failures')
- self.disk_dvr.delete_disks.assert_called_once_with(['result'])
-
- self.disk_dvr.delete_disks.reset_mock()
-
- # Delete not called if no result
- task.revert(None, None)
- self.disk_dvr.delete_disks.assert_not_called()
-
- # Delete exception doesn't raise
- self.disk_dvr.delete_disks.side_effect = pvm_exc.TimeoutError(
- "timed out")
- task.revert('result', 'flow failures')
- self.disk_dvr.delete_disks.assert_called_once_with(['result'])
-
- # Validate args on taskflow.task.Task instantiation
- with mock.patch('taskflow.task.Task.__init__') as tf:
- tf_stg.CreateDiskForImg(
- self.disk_dvr, self.context, self.instance, image_meta)
- tf.assert_called_once_with(
- name='create_disk_from_img', provides='disk_dev_info')
-
- @mock.patch('pypowervm.tasks.scsi_mapper.find_maps', autospec=True)
- @mock.patch('nova.virt.powervm.mgmt.discover_vscsi_disk', autospec=True)
- @mock.patch('nova.virt.powervm.mgmt.remove_block_dev', autospec=True)
- def test_instance_disk_to_mgmt(self, mock_rm, mock_discover, mock_find):
- mock_discover.return_value = '/dev/disk'
- mock_instance = mock.Mock()
- mock_instance.name = 'instance_name'
- mock_stg = mock.Mock()
- mock_stg.name = 'stg_name'
- mock_vwrap = mock.Mock()
- mock_vwrap.name = 'vios_name'
- mock_vwrap.uuid = 'vios_uuid'
- mock_vwrap.scsi_mappings = ['mapping1']
-
- disk_dvr = mock.MagicMock()
- disk_dvr.mp_uuid = 'mp_uuid'
- disk_dvr.connect_instance_disk_to_mgmt.return_value = (mock_stg,
- mock_vwrap)
-
- def reset_mocks():
- mock_find.reset_mock()
- mock_discover.reset_mock()
- mock_rm.reset_mock()
- disk_dvr.reset_mock()
-
- # Good path - find_maps returns one result
- mock_find.return_value = ['one_mapping']
- tf = tf_stg.InstanceDiskToMgmt(disk_dvr, mock_instance)
- self.assertEqual('instance_disk_to_mgmt', tf.name)
- self.assertEqual((mock_stg, mock_vwrap, '/dev/disk'), tf.execute())
- disk_dvr.connect_instance_disk_to_mgmt.assert_called_with(
- mock_instance)
- mock_find.assert_called_with(['mapping1'], client_lpar_id='mp_uuid',
- stg_elem=mock_stg)
- mock_discover.assert_called_with('one_mapping')
- tf.revert('result', 'failures')
- disk_dvr.disconnect_disk_from_mgmt.assert_called_with('vios_uuid',
- 'stg_name')
- mock_rm.assert_called_with('/dev/disk')
-
- # Good path - find_maps returns >1 result
- reset_mocks()
- mock_find.return_value = ['first_mapping', 'second_mapping']
- tf = tf_stg.InstanceDiskToMgmt(disk_dvr, mock_instance)
- self.assertEqual((mock_stg, mock_vwrap, '/dev/disk'), tf.execute())
- disk_dvr.connect_instance_disk_to_mgmt.assert_called_with(
- mock_instance)
- mock_find.assert_called_with(['mapping1'], client_lpar_id='mp_uuid',
- stg_elem=mock_stg)
- mock_discover.assert_called_with('first_mapping')
- tf.revert('result', 'failures')
- disk_dvr.disconnect_disk_from_mgmt.assert_called_with('vios_uuid',
- 'stg_name')
- mock_rm.assert_called_with('/dev/disk')
-
- # Management Partition is VIOS and NovaLink hosted storage
- reset_mocks()
- disk_dvr._vios_uuids = ['mp_uuid']
- dev_name = '/dev/vg/fake_name'
- disk_dvr.get_bootdisk_path.return_value = dev_name
- tf = tf_stg.InstanceDiskToMgmt(disk_dvr, mock_instance)
- self.assertEqual((None, None, dev_name), tf.execute())
-
- # Management Partition is VIOS and not NovaLink hosted storage
- reset_mocks()
- disk_dvr._vios_uuids = ['mp_uuid']
- disk_dvr.get_bootdisk_path.return_value = None
- tf = tf_stg.InstanceDiskToMgmt(disk_dvr, mock_instance)
- tf.execute()
- disk_dvr.connect_instance_disk_to_mgmt.assert_called_with(
- mock_instance)
-
- # Bad path - find_maps returns no results
- reset_mocks()
- mock_find.return_value = []
- tf = tf_stg.InstanceDiskToMgmt(disk_dvr, mock_instance)
- self.assertRaises(exception.NewMgmtMappingNotFoundException,
- tf.execute)
- disk_dvr.connect_instance_disk_to_mgmt.assert_called_with(
- mock_instance)
- # find_maps was still called
- mock_find.assert_called_with(['mapping1'], client_lpar_id='mp_uuid',
- stg_elem=mock_stg)
- # discover_vscsi_disk didn't get called
- self.assertEqual(0, mock_discover.call_count)
- tf.revert('result', 'failures')
- # disconnect_disk_from_mgmt got called
- disk_dvr.disconnect_disk_from_mgmt.assert_called_with('vios_uuid',
- 'stg_name')
- # ...but remove_block_dev did not.
- self.assertEqual(0, mock_rm.call_count)
-
- # Bad path - connect raises
- reset_mocks()
- disk_dvr.connect_instance_disk_to_mgmt.side_effect = (
- exception.InstanceDiskMappingFailed(instance_name='inst_name'))
- tf = tf_stg.InstanceDiskToMgmt(disk_dvr, mock_instance)
- self.assertRaises(exception.InstanceDiskMappingFailed, tf.execute)
- disk_dvr.connect_instance_disk_to_mgmt.assert_called_with(
- mock_instance)
- self.assertEqual(0, mock_find.call_count)
- self.assertEqual(0, mock_discover.call_count)
- # revert shouldn't call disconnect or remove
- tf.revert('result', 'failures')
- self.assertEqual(0, disk_dvr.disconnect_disk_from_mgmt.call_count)
- self.assertEqual(0, mock_rm.call_count)
-
- # Validate args on taskflow.task.Task instantiation
- with mock.patch('taskflow.task.Task.__init__') as tf:
- tf_stg.InstanceDiskToMgmt(disk_dvr, mock_instance)
- tf.assert_called_once_with(
- name='instance_disk_to_mgmt',
- provides=['stg_elem', 'vios_wrap', 'disk_path'])
-
- @mock.patch('nova.virt.powervm.mgmt.remove_block_dev', autospec=True)
- def test_remove_instance_disk_from_mgmt(self, mock_rm):
- disk_dvr = mock.MagicMock()
- mock_instance = mock.Mock()
- mock_instance.name = 'instance_name'
- mock_stg = mock.Mock()
- mock_stg.name = 'stg_name'
- mock_vwrap = mock.Mock()
- mock_vwrap.name = 'vios_name'
- mock_vwrap.uuid = 'vios_uuid'
-
- tf = tf_stg.RemoveInstanceDiskFromMgmt(disk_dvr, mock_instance)
- self.assertEqual('remove_inst_disk_from_mgmt', tf.name)
-
- # Boot disk not mapped to mgmt partition
- tf.execute(None, mock_vwrap, '/dev/disk')
- self.assertEqual(disk_dvr.disconnect_disk_from_mgmt.call_count, 0)
- self.assertEqual(mock_rm.call_count, 0)
-
- # Boot disk mapped to mgmt partition
- tf.execute(mock_stg, mock_vwrap, '/dev/disk')
- disk_dvr.disconnect_disk_from_mgmt.assert_called_with('vios_uuid',
- 'stg_name')
- mock_rm.assert_called_with('/dev/disk')
-
- # Validate args on taskflow.task.Task instantiation
- with mock.patch('taskflow.task.Task.__init__') as tf:
- tf_stg.RemoveInstanceDiskFromMgmt(disk_dvr, mock_instance)
- tf.assert_called_once_with(
- name='remove_inst_disk_from_mgmt',
- requires=['stg_elem', 'vios_wrap', 'disk_path'])
-
- def test_attach_volume(self):
- vol_dvr = mock.Mock(connection_info={'data': {'volume_id': '1'}})
-
- task = tf_stg.AttachVolume(vol_dvr)
- task.execute()
- vol_dvr.attach_volume.assert_called_once_with()
-
- task.revert('result', 'flow failures')
- vol_dvr.reset_stg_ftsk.assert_called_once_with()
- vol_dvr.detach_volume.assert_called_once_with()
-
- # Validate args on taskflow.task.Task instantiation
- with mock.patch('taskflow.task.Task.__init__') as tf:
- tf_stg.AttachVolume(vol_dvr)
- tf.assert_called_once_with(name='attach_vol_1')
-
- def test_detach_volume(self):
- vol_dvr = mock.Mock(connection_info={'data': {'volume_id': '1'}})
-
- task = tf_stg.DetachVolume(vol_dvr)
- task.execute()
- vol_dvr.detach_volume.assert_called_once_with()
-
- task.revert('result', 'flow failures')
- vol_dvr.reset_stg_ftsk.assert_called_once_with()
- vol_dvr.detach_volume.assert_called_once_with()
-
- # Validate args on taskflow.task.Task instantiation
- with mock.patch('taskflow.task.Task.__init__') as tf:
- tf_stg.DetachVolume(vol_dvr)
- tf.assert_called_once_with(name='detach_vol_1')
diff --git a/nova/tests/unit/virt/powervm/tasks/test_vm.py b/nova/tests/unit/virt/powervm/tasks/test_vm.py
deleted file mode 100644
index fc68646acf..0000000000
--- a/nova/tests/unit/virt/powervm/tasks/test_vm.py
+++ /dev/null
@@ -1,134 +0,0 @@
-# Copyright 2015, 2018 IBM Corp.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import mock
-from taskflow import engines as tf_eng
-from taskflow.patterns import linear_flow as tf_lf
-from taskflow import task as tf_tsk
-
-from nova import exception
-from nova import test
-from nova.virt.powervm.tasks import vm as tf_vm
-
-
-class TestVMTasks(test.NoDBTestCase):
- def setUp(self):
- super(TestVMTasks, self).setUp()
- self.apt = mock.Mock()
- self.instance = mock.Mock()
-
- @mock.patch('nova.virt.powervm.vm.get_instance_wrapper', autospec=True)
- def test_get(self, mock_get_wrap):
- get = tf_vm.Get(self.apt, self.instance)
- get.execute()
- mock_get_wrap.assert_called_once_with(self.apt, self.instance)
-
- # Validate args on taskflow.task.Task instantiation
- with mock.patch('taskflow.task.Task.__init__') as tf:
- tf_vm.Get(self.apt, self.instance)
- tf.assert_called_once_with(name='get_vm', provides='lpar_wrap')
-
- @mock.patch('pypowervm.tasks.storage.add_lpar_storage_scrub_tasks',
- autospec=True)
- @mock.patch('nova.virt.powervm.vm.create_lpar')
- def test_create(self, mock_vm_crt, mock_stg):
- lpar_entry = mock.Mock()
-
- # Test create with normal (non-recreate) ftsk
- crt = tf_vm.Create(self.apt, 'host_wrapper', self.instance, 'ftsk')
- mock_vm_crt.return_value = lpar_entry
- crt.execute()
-
- mock_vm_crt.assert_called_once_with(self.apt, 'host_wrapper',
- self.instance)
-
- mock_stg.assert_called_once_with(
- [lpar_entry.id], 'ftsk', lpars_exist=True)
- mock_stg.assert_called_once_with([mock_vm_crt.return_value.id], 'ftsk',
- lpars_exist=True)
-
- # Validate args on taskflow.task.Task instantiation
- with mock.patch('taskflow.task.Task.__init__') as tf:
- tf_vm.Create(self.apt, 'host_wrapper', self.instance, 'ftsk')
- tf.assert_called_once_with(name='crt_vm', provides='lpar_wrap')
-
- @mock.patch('nova.virt.powervm.vm.power_on')
- def test_power_on(self, mock_pwron):
- pwron = tf_vm.PowerOn(self.apt, self.instance)
- pwron.execute()
- mock_pwron.assert_called_once_with(self.apt, self.instance)
-
- # Validate args on taskflow.task.Task instantiation
- with mock.patch('taskflow.task.Task.__init__') as tf:
- tf_vm.PowerOn(self.apt, self.instance)
- tf.assert_called_once_with(name='pwr_vm')
-
- @mock.patch('nova.virt.powervm.vm.power_on')
- @mock.patch('nova.virt.powervm.vm.power_off')
- def test_power_on_revert(self, mock_pwroff, mock_pwron):
- flow = tf_lf.Flow('revert_power_on')
- pwron = tf_vm.PowerOn(self.apt, self.instance)
- flow.add(pwron)
-
- # Dummy Task that fails, triggering flow revert
- def failure(*a, **k):
- raise ValueError()
- flow.add(tf_tsk.FunctorTask(failure))
-
- # When PowerOn.execute doesn't fail, revert calls power_off
- self.assertRaises(ValueError, tf_eng.run, flow)
- mock_pwron.assert_called_once_with(self.apt, self.instance)
- mock_pwroff.assert_called_once_with(self.apt, self.instance,
- force_immediate=True)
-
- mock_pwron.reset_mock()
- mock_pwroff.reset_mock()
-
- # When PowerOn.execute fails, revert doesn't call power_off
- mock_pwron.side_effect = exception.NovaException()
- self.assertRaises(exception.NovaException, tf_eng.run, flow)
- mock_pwron.assert_called_once_with(self.apt, self.instance)
- mock_pwroff.assert_not_called()
-
- @mock.patch('nova.virt.powervm.vm.power_off')
- def test_power_off(self, mock_pwroff):
- # Default force_immediate
- pwroff = tf_vm.PowerOff(self.apt, self.instance)
- pwroff.execute()
- mock_pwroff.assert_called_once_with(self.apt, self.instance,
- force_immediate=False)
-
- mock_pwroff.reset_mock()
-
- # Explicit force_immediate
- pwroff = tf_vm.PowerOff(self.apt, self.instance, force_immediate=True)
- pwroff.execute()
- mock_pwroff.assert_called_once_with(self.apt, self.instance,
- force_immediate=True)
-
- # Validate args on taskflow.task.Task instantiation
- with mock.patch('taskflow.task.Task.__init__') as tf:
- tf_vm.PowerOff(self.apt, self.instance)
- tf.assert_called_once_with(name='pwr_off_vm')
-
- @mock.patch('nova.virt.powervm.vm.delete_lpar')
- def test_delete(self, mock_dlt):
- delete = tf_vm.Delete(self.apt, self.instance)
- delete.execute()
- mock_dlt.assert_called_once_with(self.apt, self.instance)
-
- # Validate args on taskflow.task.Task instantiation
- with mock.patch('taskflow.task.Task.__init__') as tf:
- tf_vm.Delete(self.apt, self.instance)
- tf.assert_called_once_with(name='dlt_vm')
diff --git a/nova/tests/unit/virt/powervm/test_driver.py b/nova/tests/unit/virt/powervm/test_driver.py
deleted file mode 100644
index 025d823d15..0000000000
--- a/nova/tests/unit/virt/powervm/test_driver.py
+++ /dev/null
@@ -1,649 +0,0 @@
-# Copyright 2016, 2018 IBM Corp.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import contextlib
-
-import fixtures
-import mock
-from oslo_serialization import jsonutils
-from oslo_utils.fixture import uuidsentinel as uuids
-from pypowervm import const as pvm_const
-from pypowervm import exceptions as pvm_exc
-from pypowervm.helpers import log_helper as pvm_hlp_log
-from pypowervm.helpers import vios_busy as pvm_hlp_vbusy
-from pypowervm.utils import transaction as pvm_tx
-from pypowervm.wrappers import virtual_io_server as pvm_vios
-
-from nova import block_device as nova_block_device
-from nova.compute import provider_tree
-from nova import conf as cfg
-from nova import exception
-from nova.objects import block_device as bdmobj
-from nova import test
-from nova.tests.unit.virt import powervm
-from nova.virt import block_device as nova_virt_bdm
-from nova.virt import driver as nova_driver
-from nova.virt.driver import ComputeDriver
-from nova.virt import hardware
-from nova.virt.powervm.disk import ssp
-from nova.virt.powervm import driver
-
-CONF = cfg.CONF
-
-
-class TestPowerVMDriver(test.NoDBTestCase):
-
- def setUp(self):
- super(TestPowerVMDriver, self).setUp()
- self.drv = driver.PowerVMDriver('virtapi')
- self.adp = self.useFixture(fixtures.MockPatch(
- 'pypowervm.adapter.Adapter', autospec=True)).mock
- self.drv.adapter = self.adp
- self.sess = self.useFixture(fixtures.MockPatch(
- 'pypowervm.adapter.Session', autospec=True)).mock
-
- self.pwron = self.useFixture(fixtures.MockPatch(
- 'nova.virt.powervm.vm.power_on')).mock
- self.pwroff = self.useFixture(fixtures.MockPatch(
- 'nova.virt.powervm.vm.power_off')).mock
-
- # Create an instance to test with
- self.inst = powervm.TEST_INSTANCE
-
- def test_driver_capabilities(self):
- """Test the driver capabilities."""
- # check that the driver reports all capabilities
- self.assertEqual(set(ComputeDriver.capabilities),
- set(self.drv.capabilities))
- # check the values for each capability
- self.assertFalse(self.drv.capabilities['has_imagecache'])
- self.assertFalse(self.drv.capabilities['supports_evacuate'])
- self.assertFalse(
- self.drv.capabilities['supports_migrate_to_same_host'])
- self.assertTrue(self.drv.capabilities['supports_attach_interface'])
- self.assertFalse(self.drv.capabilities['supports_device_tagging'])
- self.assertFalse(
- self.drv.capabilities['supports_tagged_attach_interface'])
- self.assertFalse(
- self.drv.capabilities['supports_tagged_attach_volume'])
- self.assertTrue(self.drv.capabilities['supports_extend_volume'])
- self.assertFalse(self.drv.capabilities['supports_multiattach'])
-
- @mock.patch('nova.image.glance.API')
- @mock.patch('pypowervm.tasks.storage.ComprehensiveScrub', autospec=True)
- @mock.patch('oslo_utils.importutils.import_object_ns', autospec=True)
- @mock.patch('pypowervm.wrappers.managed_system.System', autospec=True)
- @mock.patch('pypowervm.tasks.partition.validate_vios_ready', autospec=True)
- def test_init_host(self, mock_vvr, mock_sys, mock_import, mock_scrub,
- mock_img):
- mock_hostw = mock.Mock(uuid='uuid')
- mock_sys.get.return_value = [mock_hostw]
- self.drv.init_host('host')
- self.sess.assert_called_once_with(conn_tries=60)
- self.adp.assert_called_once_with(
- self.sess.return_value, helpers=[
- pvm_hlp_log.log_helper, pvm_hlp_vbusy.vios_busy_retry_helper])
- mock_vvr.assert_called_once_with(self.drv.adapter)
- mock_sys.get.assert_called_once_with(self.drv.adapter)
- self.assertEqual(mock_hostw, self.drv.host_wrapper)
- mock_scrub.assert_called_once_with(self.drv.adapter)
- mock_scrub.return_value.execute.assert_called_once_with()
- mock_import.assert_called_once_with(
- 'nova.virt.powervm.disk', 'localdisk.LocalStorage',
- self.drv.adapter, 'uuid')
- self.assertEqual(mock_import.return_value, self.drv.disk_dvr)
- mock_img.assert_called_once_with()
- self.assertEqual(mock_img.return_value, self.drv.image_api)
-
- @mock.patch('nova.virt.powervm.vm.get_pvm_uuid')
- @mock.patch('nova.virt.powervm.vm.get_vm_qp')
- @mock.patch('nova.virt.powervm.vm._translate_vm_state')
- def test_get_info(self, mock_tx_state, mock_qp, mock_uuid):
- mock_tx_state.return_value = 'fake-state'
- self.assertEqual(hardware.InstanceInfo('fake-state'),
- self.drv.get_info('inst'))
- mock_uuid.assert_called_once_with('inst')
- mock_qp.assert_called_once_with(
- self.drv.adapter, mock_uuid.return_value, 'PartitionState')
- mock_tx_state.assert_called_once_with(mock_qp.return_value)
-
- @mock.patch('nova.virt.powervm.vm.get_lpar_names')
- def test_list_instances(self, mock_names):
- mock_names.return_value = ['one', 'two', 'three']
- self.assertEqual(['one', 'two', 'three'], self.drv.list_instances())
- mock_names.assert_called_once_with(self.adp)
-
- def test_get_available_nodes(self):
- self.flags(host='hostname')
- self.assertEqual(['hostname'], self.drv.get_available_nodes('node'))
-
- @mock.patch('pypowervm.wrappers.managed_system.System', autospec=True)
- @mock.patch('nova.virt.powervm.host.build_host_resource_from_ms')
- def test_get_available_resource(self, mock_bhrfm, mock_sys):
- mock_sys.get.return_value = ['sys']
- mock_bhrfm.return_value = {'foo': 'bar'}
- self.drv.disk_dvr = mock.create_autospec(ssp.SSPDiskAdapter,
- instance=True)
- self.assertEqual(
- {'foo': 'bar', 'local_gb': self.drv.disk_dvr.capacity,
- 'local_gb_used': self.drv.disk_dvr.capacity_used},
- self.drv.get_available_resource('node'))
- mock_sys.get.assert_called_once_with(self.adp)
- mock_bhrfm.assert_called_once_with('sys')
- self.assertEqual('sys', self.drv.host_wrapper)
-
- @contextlib.contextmanager
- def _update_provider_tree(self, allocations=None):
- """Host resource dict gets converted properly to provider tree inv."""
-
- with mock.patch('nova.virt.powervm.host.'
- 'build_host_resource_from_ms') as mock_bhrfm:
- mock_bhrfm.return_value = {
- 'vcpus': 8,
- 'memory_mb': 2048,
- }
- self.drv.host_wrapper = 'host_wrapper'
- # Validate that this gets converted to int with floor
- self.drv.disk_dvr = mock.Mock(capacity=2091.8)
- exp_inv = {
- 'VCPU': {
- 'total': 8,
- 'max_unit': 8,
- 'allocation_ratio': 16.0,
- 'reserved': 0,
- },
- 'MEMORY_MB': {
- 'total': 2048,
- 'max_unit': 2048,
- 'allocation_ratio': 1.5,
- 'reserved': 512,
- },
- 'DISK_GB': {
- 'total': 2091,
- 'max_unit': 2091,
- 'allocation_ratio': 1.0,
- 'reserved': 0,
- },
- }
- ptree = provider_tree.ProviderTree()
- ptree.new_root('compute_host', uuids.cn)
- # Let the caller muck with these
- yield ptree, exp_inv
- self.drv.update_provider_tree(ptree, 'compute_host',
- allocations=allocations)
- self.assertEqual(exp_inv, ptree.data('compute_host').inventory)
- mock_bhrfm.assert_called_once_with('host_wrapper')
-
- def test_update_provider_tree(self):
- # Basic: no inventory already on the provider, no extra providers, no
- # aggregates or traits.
- with self._update_provider_tree():
- pass
-
- def test_update_provider_tree_ignore_allocations(self):
- with self._update_provider_tree(allocations="This is ignored"):
- pass
-
- def test_update_provider_tree_conf_overrides(self):
- # Non-default CONF values for allocation ratios and reserved.
- self.flags(cpu_allocation_ratio=12.3,
- reserved_host_cpus=4,
- ram_allocation_ratio=4.5,
- reserved_host_memory_mb=32,
- disk_allocation_ratio=6.7,
- # This gets int(ceil)'d
- reserved_host_disk_mb=5432.1)
- with self._update_provider_tree() as (_, exp_inv):
- exp_inv['VCPU']['allocation_ratio'] = 12.3
- exp_inv['VCPU']['reserved'] = 4
- exp_inv['MEMORY_MB']['allocation_ratio'] = 4.5
- exp_inv['MEMORY_MB']['reserved'] = 32
- exp_inv['DISK_GB']['allocation_ratio'] = 6.7
- exp_inv['DISK_GB']['reserved'] = 6
-
- def test_update_provider_tree_complex_ptree(self):
- # Overrides inventory already on the provider; leaves other providers
- # and aggregates/traits alone.
- with self._update_provider_tree() as (ptree, exp_inv):
- ptree.update_inventory('compute_host', {
- # these should get blown away
- 'VCPU': {
- 'total': 16,
- 'max_unit': 2,
- 'allocation_ratio': 1.0,
- 'reserved': 10,
- },
- 'CUSTOM_BOGUS': {
- 'total': 1234,
- }
- })
- ptree.update_aggregates('compute_host',
- [uuids.ss_agg, uuids.other_agg])
- ptree.update_traits('compute_host', ['CUSTOM_FOO', 'CUSTOM_BAR'])
- ptree.new_root('ssp', uuids.ssp)
- ptree.update_inventory('ssp', {'sentinel': 'inventory',
- 'for': 'ssp'})
- ptree.update_aggregates('ssp', [uuids.ss_agg])
- ptree.new_child('sriov', 'compute_host', uuid=uuids.sriov)
- # Since CONF.cpu_allocation_ratio is not set and this is not
- # the initial upt call (so CONF.initial_cpu_allocation_ratio would
- # be used), the existing allocation ratio value from the tree is
- # used.
- exp_inv['VCPU']['allocation_ratio'] = 1.0
-
- # Make sure the compute's agg and traits were left alone
- cndata = ptree.data('compute_host')
- self.assertEqual(set([uuids.ss_agg, uuids.other_agg]),
- cndata.aggregates)
- self.assertEqual(set(['CUSTOM_FOO', 'CUSTOM_BAR']), cndata.traits)
- # And the other providers were left alone
- self.assertEqual(set([uuids.cn, uuids.ssp, uuids.sriov]),
- set(ptree.get_provider_uuids()))
- # ...including the ssp's aggregates
- self.assertEqual(set([uuids.ss_agg]), ptree.data('ssp').aggregates)
-
- @mock.patch('nova.virt.powervm.tasks.storage.AttachVolume.execute')
- @mock.patch('nova.virt.powervm.tasks.network.PlugMgmtVif.execute')
- @mock.patch('nova.virt.powervm.tasks.network.PlugVifs.execute')
- @mock.patch('nova.virt.powervm.media.ConfigDrivePowerVM')
- @mock.patch('nova.virt.configdrive.required_by')
- @mock.patch('nova.virt.powervm.vm.create_lpar')
- @mock.patch('pypowervm.tasks.partition.build_active_vio_feed_task',
- autospec=True)
- @mock.patch('pypowervm.tasks.storage.add_lpar_storage_scrub_tasks',
- autospec=True)
- def test_spawn_ops(self, mock_scrub, mock_bldftsk, mock_crt_lpar,
- mock_cdrb, mock_cfg_drv, mock_plug_vifs,
- mock_plug_mgmt_vif, mock_attach_vol):
- """Validates the 'typical' spawn flow of the spawn of an instance. """
- mock_cdrb.return_value = True
- self.drv.host_wrapper = mock.Mock()
- self.drv.disk_dvr = mock.create_autospec(ssp.SSPDiskAdapter,
- instance=True)
- mock_ftsk = pvm_tx.FeedTask('fake', [mock.Mock(spec=pvm_vios.VIOS)])
- mock_bldftsk.return_value = mock_ftsk
- block_device_info = self._fake_bdms()
- self.drv.spawn('context', self.inst, 'img_meta', 'files', 'password',
- 'allocs', network_info='netinfo',
- block_device_info=block_device_info)
- mock_crt_lpar.assert_called_once_with(
- self.adp, self.drv.host_wrapper, self.inst)
- mock_bldftsk.assert_called_once_with(
- self.adp, xag={pvm_const.XAG.VIO_SMAP, pvm_const.XAG.VIO_FMAP})
- self.assertTrue(mock_plug_vifs.called)
- self.assertTrue(mock_plug_mgmt_vif.called)
- mock_scrub.assert_called_once_with(
- [mock_crt_lpar.return_value.id], mock_ftsk, lpars_exist=True)
- self.drv.disk_dvr.create_disk_from_image.assert_called_once_with(
- 'context', self.inst, 'img_meta')
- self.drv.disk_dvr.attach_disk.assert_called_once_with(
- self.inst, self.drv.disk_dvr.create_disk_from_image.return_value,
- mock_ftsk)
- self.assertEqual(2, mock_attach_vol.call_count)
- mock_cfg_drv.assert_called_once_with(self.adp)
- mock_cfg_drv.return_value.create_cfg_drv_vopt.assert_called_once_with(
- self.inst, 'files', 'netinfo', mock_ftsk, admin_pass='password',
- mgmt_cna=mock.ANY)
- self.pwron.assert_called_once_with(self.adp, self.inst)
-
- mock_cfg_drv.reset_mock()
- mock_attach_vol.reset_mock()
-
- # No config drive, no bdms
- mock_cdrb.return_value = False
- self.drv.spawn('context', self.inst, 'img_meta', 'files', 'password',
- 'allocs')
- mock_cfg_drv.assert_not_called()
- mock_attach_vol.assert_not_called()
-
- @mock.patch('nova.virt.powervm.tasks.storage.DetachVolume.execute')
- @mock.patch('nova.virt.powervm.tasks.network.UnplugVifs.execute')
- @mock.patch('nova.virt.powervm.vm.delete_lpar')
- @mock.patch('nova.virt.powervm.media.ConfigDrivePowerVM')
- @mock.patch('nova.virt.configdrive.required_by')
- @mock.patch('pypowervm.tasks.partition.build_active_vio_feed_task',
- autospec=True)
- def test_destroy(self, mock_bldftsk, mock_cdrb, mock_cfgdrv,
- mock_dlt_lpar, mock_unplug, mock_detach_vol):
- """Validates PowerVM destroy."""
- self.drv.host_wrapper = mock.Mock()
- self.drv.disk_dvr = mock.create_autospec(ssp.SSPDiskAdapter,
- instance=True)
-
- mock_ftsk = pvm_tx.FeedTask('fake', [mock.Mock(spec=pvm_vios.VIOS)])
- mock_bldftsk.return_value = mock_ftsk
- block_device_info = self._fake_bdms()
-
- # Good path, with config drive, destroy disks
- mock_cdrb.return_value = True
- self.drv.destroy('context', self.inst, [],
- block_device_info=block_device_info)
- self.pwroff.assert_called_once_with(
- self.adp, self.inst, force_immediate=True)
- mock_bldftsk.assert_called_once_with(
- self.adp, xag=[pvm_const.XAG.VIO_SMAP])
- mock_unplug.assert_called_once()
- mock_cdrb.assert_called_once_with(self.inst)
- mock_cfgdrv.assert_called_once_with(self.adp)
- mock_cfgdrv.return_value.dlt_vopt.assert_called_once_with(
- self.inst, stg_ftsk=mock_bldftsk.return_value)
- self.assertEqual(2, mock_detach_vol.call_count)
- self.drv.disk_dvr.detach_disk.assert_called_once_with(
- self.inst)
- self.drv.disk_dvr.delete_disks.assert_called_once_with(
- self.drv.disk_dvr.detach_disk.return_value)
- mock_dlt_lpar.assert_called_once_with(self.adp, self.inst)
-
- self.pwroff.reset_mock()
- mock_bldftsk.reset_mock()
- mock_unplug.reset_mock()
- mock_cdrb.reset_mock()
- mock_cfgdrv.reset_mock()
- self.drv.disk_dvr.detach_disk.reset_mock()
- self.drv.disk_dvr.delete_disks.reset_mock()
- mock_detach_vol.reset_mock()
- mock_dlt_lpar.reset_mock()
-
- # No config drive, preserve disks, no block device info
- mock_cdrb.return_value = False
- self.drv.destroy('context', self.inst, [], block_device_info={},
- destroy_disks=False)
- mock_cfgdrv.return_value.dlt_vopt.assert_not_called()
- mock_detach_vol.assert_not_called()
- self.drv.disk_dvr.delete_disks.assert_not_called()
-
- # Non-forced power_off, since preserving disks
- self.pwroff.assert_called_once_with(
- self.adp, self.inst, force_immediate=False)
- mock_bldftsk.assert_called_once_with(
- self.adp, xag=[pvm_const.XAG.VIO_SMAP])
- mock_unplug.assert_called_once()
- mock_cdrb.assert_called_once_with(self.inst)
- mock_cfgdrv.assert_not_called()
- mock_cfgdrv.return_value.dlt_vopt.assert_not_called()
- self.drv.disk_dvr.detach_disk.assert_called_once_with(
- self.inst)
- self.drv.disk_dvr.delete_disks.assert_not_called()
- mock_dlt_lpar.assert_called_once_with(self.adp, self.inst)
-
- self.pwroff.reset_mock()
- mock_bldftsk.reset_mock()
- mock_unplug.reset_mock()
- mock_cdrb.reset_mock()
- mock_cfgdrv.reset_mock()
- self.drv.disk_dvr.detach_disk.reset_mock()
- self.drv.disk_dvr.delete_disks.reset_mock()
- mock_dlt_lpar.reset_mock()
-
- # InstanceNotFound exception, non-forced
- self.pwroff.side_effect = exception.InstanceNotFound(
- instance_id='something')
- self.drv.destroy('context', self.inst, [], block_device_info={},
- destroy_disks=False)
- self.pwroff.assert_called_once_with(
- self.adp, self.inst, force_immediate=False)
- self.drv.disk_dvr.detach_disk.assert_not_called()
- mock_unplug.assert_not_called()
- self.drv.disk_dvr.delete_disks.assert_not_called()
- mock_dlt_lpar.assert_not_called()
-
- self.pwroff.reset_mock()
- self.pwroff.side_effect = None
- mock_unplug.reset_mock()
-
- # Convertible (PowerVM) exception
- mock_dlt_lpar.side_effect = pvm_exc.TimeoutError("Timed out")
- self.assertRaises(exception.InstanceTerminationFailure,
- self.drv.destroy, 'context', self.inst, [],
- block_device_info={})
-
- # Everything got called
- self.pwroff.assert_called_once_with(
- self.adp, self.inst, force_immediate=True)
- mock_unplug.assert_called_once()
- self.drv.disk_dvr.detach_disk.assert_called_once_with(self.inst)
- self.drv.disk_dvr.delete_disks.assert_called_once_with(
- self.drv.disk_dvr.detach_disk.return_value)
- mock_dlt_lpar.assert_called_once_with(self.adp, self.inst)
-
- # Other random exception raises directly
- mock_dlt_lpar.side_effect = ValueError()
- self.assertRaises(ValueError,
- self.drv.destroy, 'context', self.inst, [],
- block_device_info={})
-
- @mock.patch('nova.virt.powervm.tasks.image.UpdateTaskState.'
- 'execute', autospec=True)
- @mock.patch('nova.virt.powervm.tasks.storage.InstanceDiskToMgmt.'
- 'execute', autospec=True)
- @mock.patch('nova.virt.powervm.tasks.image.StreamToGlance.execute')
- @mock.patch('nova.virt.powervm.tasks.storage.RemoveInstanceDiskFromMgmt.'
- 'execute')
- def test_snapshot(self, mock_rm, mock_stream, mock_conn, mock_update):
- self.drv.disk_dvr = mock.Mock()
- self.drv.image_api = mock.Mock()
- mock_conn.return_value = 'stg_elem', 'vios_wrap', 'disk_path'
- self.drv.snapshot('context', self.inst, 'image_id',
- 'update_task_state')
- self.assertEqual(2, mock_update.call_count)
- self.assertEqual(1, mock_conn.call_count)
- mock_stream.assert_called_once_with(disk_path='disk_path')
- mock_rm.assert_called_once_with(
- stg_elem='stg_elem', vios_wrap='vios_wrap', disk_path='disk_path')
-
- self.drv.disk_dvr.capabilities = {'snapshot': False}
- self.assertRaises(exception.NotSupportedWithOption, self.drv.snapshot,
- 'context', self.inst, 'image_id', 'update_task_state')
-
- def test_power_on(self):
- self.drv.power_on('context', self.inst, 'network_info')
- self.pwron.assert_called_once_with(self.adp, self.inst)
-
- def test_power_off(self):
- self.drv.power_off(self.inst)
- self.pwroff.assert_called_once_with(
- self.adp, self.inst, force_immediate=True, timeout=None)
-
- def test_power_off_timeout(self):
- # Long timeout (retry interval means nothing on powervm)
- self.drv.power_off(self.inst, timeout=500, retry_interval=10)
- self.pwroff.assert_called_once_with(
- self.adp, self.inst, force_immediate=False, timeout=500)
-
- @mock.patch('nova.virt.powervm.vm.reboot')
- def test_reboot_soft(self, mock_reboot):
- inst = mock.Mock()
- self.drv.reboot('context', inst, 'network_info', 'SOFT')
- mock_reboot.assert_called_once_with(self.adp, inst, False)
-
- @mock.patch('nova.virt.powervm.vm.reboot')
- def test_reboot_hard(self, mock_reboot):
- inst = mock.Mock()
- self.drv.reboot('context', inst, 'network_info', 'HARD')
- mock_reboot.assert_called_once_with(self.adp, inst, True)
-
- @mock.patch('nova.virt.powervm.driver.PowerVMDriver.plug_vifs')
- def test_attach_interface(self, mock_plug_vifs):
- self.drv.attach_interface('context', 'inst', 'image_meta', 'vif')
- mock_plug_vifs.assert_called_once_with('inst', ['vif'])
-
- @mock.patch('nova.virt.powervm.driver.PowerVMDriver.unplug_vifs')
- def test_detach_interface(self, mock_unplug_vifs):
- self.drv.detach_interface('context', 'inst', 'vif')
- mock_unplug_vifs.assert_called_once_with('inst', ['vif'])
-
- @mock.patch('nova.virt.powervm.tasks.vm.Get', autospec=True)
- @mock.patch('nova.virt.powervm.tasks.base.run', autospec=True)
- @mock.patch('nova.virt.powervm.tasks.network.PlugVifs', autospec=True)
- @mock.patch('taskflow.patterns.linear_flow.Flow', autospec=True)
- def test_plug_vifs(self, mock_tf, mock_plug_vifs, mock_tf_run, mock_get):
- # Successful plug
- mock_inst = mock.Mock()
- self.drv.plug_vifs(mock_inst, 'net_info')
- mock_get.assert_called_once_with(self.adp, mock_inst)
- mock_plug_vifs.assert_called_once_with(
- self.drv.virtapi, self.adp, mock_inst, 'net_info')
- add_calls = [mock.call(mock_get.return_value),
- mock.call(mock_plug_vifs.return_value)]
- mock_tf.return_value.add.assert_has_calls(add_calls)
- mock_tf_run.assert_called_once_with(
- mock_tf.return_value, instance=mock_inst)
-
- # InstanceNotFound and generic exception both raise
- mock_tf_run.side_effect = exception.InstanceNotFound('id')
- exc = self.assertRaises(exception.VirtualInterfacePlugException,
- self.drv.plug_vifs, mock_inst, 'net_info')
- self.assertIn('instance', str(exc))
- mock_tf_run.side_effect = Exception
- exc = self.assertRaises(exception.VirtualInterfacePlugException,
- self.drv.plug_vifs, mock_inst, 'net_info')
- self.assertIn('unexpected', str(exc))
-
- @mock.patch('nova.virt.powervm.tasks.base.run', autospec=True)
- @mock.patch('nova.virt.powervm.tasks.network.UnplugVifs', autospec=True)
- @mock.patch('taskflow.patterns.linear_flow.Flow', autospec=True)
- def test_unplug_vifs(self, mock_tf, mock_unplug_vifs, mock_tf_run):
- # Successful unplug
- mock_inst = mock.Mock()
- self.drv.unplug_vifs(mock_inst, 'net_info')
- mock_unplug_vifs.assert_called_once_with(self.adp, mock_inst,
- 'net_info')
- mock_tf.return_value.add.assert_called_once_with(
- mock_unplug_vifs.return_value)
- mock_tf_run.assert_called_once_with(mock_tf.return_value,
- instance=mock_inst)
-
- # InstanceNotFound should pass
- mock_tf_run.side_effect = exception.InstanceNotFound(instance_id='1')
- self.drv.unplug_vifs(mock_inst, 'net_info')
-
- # Raise InterfaceDetachFailed otherwise
- mock_tf_run.side_effect = Exception
- self.assertRaises(exception.InterfaceDetachFailed,
- self.drv.unplug_vifs, mock_inst, 'net_info')
-
- @mock.patch('pypowervm.tasks.vterm.open_remotable_vnc_vterm',
- autospec=True)
- @mock.patch('nova.virt.powervm.vm.get_pvm_uuid',
- new=mock.Mock(return_value='uuid'))
- def test_get_vnc_console(self, mock_vterm):
- # Success
- mock_vterm.return_value = '10'
- resp = self.drv.get_vnc_console(mock.ANY, self.inst)
- self.assertEqual('127.0.0.1', resp.host)
- self.assertEqual('10', resp.port)
- self.assertEqual('uuid', resp.internal_access_path)
- mock_vterm.assert_called_once_with(
- mock.ANY, 'uuid', mock.ANY, vnc_path='uuid')
-
- # VNC failure - exception is raised directly
- mock_vterm.side_effect = pvm_exc.VNCBasedTerminalFailedToOpen(err='xx')
- self.assertRaises(pvm_exc.VNCBasedTerminalFailedToOpen,
- self.drv.get_vnc_console, mock.ANY, self.inst)
-
- # 404
- mock_vterm.side_effect = pvm_exc.HttpError(mock.Mock(status=404))
- self.assertRaises(exception.InstanceNotFound, self.drv.get_vnc_console,
- mock.ANY, self.inst)
-
- @mock.patch('nova.virt.powervm.volume.fcvscsi.FCVscsiVolumeAdapter')
- def test_attach_volume(self, mock_vscsi_adpt):
- """Validates the basic PowerVM attach volume."""
- # BDMs
- mock_bdm = self._fake_bdms()['block_device_mapping'][0]
-
- with mock.patch.object(self.inst, 'save') as mock_save:
- # Invoke the method.
- self.drv.attach_volume('context', mock_bdm.get('connection_info'),
- self.inst, mock.sentinel.stg_ftsk)
-
- # Verify the connect volume was invoked
- mock_vscsi_adpt.return_value.attach_volume.assert_called_once_with()
- mock_save.assert_called_once_with()
-
- @mock.patch('nova.virt.powervm.volume.fcvscsi.FCVscsiVolumeAdapter')
- def test_detach_volume(self, mock_vscsi_adpt):
- """Validates the basic PowerVM detach volume."""
- # BDMs
- mock_bdm = self._fake_bdms()['block_device_mapping'][0]
-
- # Invoke the method, good path test.
- self.drv.detach_volume('context', mock_bdm.get('connection_info'),
- self.inst, mock.sentinel.stg_ftsk)
- # Verify the disconnect volume was invoked
- mock_vscsi_adpt.return_value.detach_volume.assert_called_once_with()
-
- @mock.patch('nova.virt.powervm.volume.fcvscsi.FCVscsiVolumeAdapter')
- def test_extend_volume(self, mock_vscsi_adpt):
- mock_bdm = self._fake_bdms()['block_device_mapping'][0]
- self.drv.extend_volume(
- 'context', mock_bdm.get('connection_info'), self.inst, 0)
- mock_vscsi_adpt.return_value.extend_volume.assert_called_once_with()
-
- def test_vol_drv_iter(self):
- block_device_info = self._fake_bdms()
- bdms = nova_driver.block_device_info_get_mapping(block_device_info)
- vol_adpt = mock.Mock()
-
- def _get_results(bdms):
- # Patch so we get the same mock back each time.
- with mock.patch('nova.virt.powervm.volume.fcvscsi.'
- 'FCVscsiVolumeAdapter', return_value=vol_adpt):
- return [
- (bdm, vol_drv) for bdm, vol_drv in self.drv._vol_drv_iter(
- 'context', self.inst, bdms)]
-
- results = _get_results(bdms)
- self.assertEqual(
- 'fake_vol1',
- results[0][0]['connection_info']['data']['volume_id'])
- self.assertEqual(vol_adpt, results[0][1])
- self.assertEqual(
- 'fake_vol2',
- results[1][0]['connection_info']['data']['volume_id'])
- self.assertEqual(vol_adpt, results[1][1])
-
- # Test with empty bdms
- self.assertEqual([], _get_results([]))
-
- @staticmethod
- def _fake_bdms():
- def _fake_bdm(volume_id, target_lun):
- connection_info = {'driver_volume_type': 'fibre_channel',
- 'data': {'volume_id': volume_id,
- 'target_lun': target_lun,
- 'initiator_target_map':
- {'21000024F5': ['50050768']}}}
- mapping_dict = {'source_type': 'volume', 'volume_id': volume_id,
- 'destination_type': 'volume',
- 'connection_info':
- jsonutils.dumps(connection_info),
- }
- bdm_dict = nova_block_device.BlockDeviceDict(mapping_dict)
- bdm_obj = bdmobj.BlockDeviceMapping(**bdm_dict)
-
- return nova_virt_bdm.DriverVolumeBlockDevice(bdm_obj)
-
- bdm_list = [_fake_bdm('fake_vol1', 0), _fake_bdm('fake_vol2', 1)]
- block_device_info = {'block_device_mapping': bdm_list}
-
- return block_device_info
-
- @mock.patch('nova.virt.powervm.volume.fcvscsi.wwpns', autospec=True)
- def test_get_volume_connector(self, mock_wwpns):
- vol_connector = self.drv.get_volume_connector(mock.Mock())
- self.assertEqual(mock_wwpns.return_value, vol_connector['wwpns'])
- self.assertFalse(vol_connector['multipath'])
- self.assertEqual(vol_connector['host'], CONF.host)
- self.assertIsNone(vol_connector['initiator'])
diff --git a/nova/tests/unit/virt/powervm/test_host.py b/nova/tests/unit/virt/powervm/test_host.py
deleted file mode 100644
index 625e1f9c70..0000000000
--- a/nova/tests/unit/virt/powervm/test_host.py
+++ /dev/null
@@ -1,62 +0,0 @@
-# Copyright 2016 IBM Corp.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-
-import mock
-from pypowervm.wrappers import managed_system as pvm_ms
-
-from nova import test
-from nova.virt.powervm import host as pvm_host
-
-
-class TestPowerVMHost(test.NoDBTestCase):
- def test_host_resources(self):
- # Create objects to test with
- ms_wrapper = mock.create_autospec(pvm_ms.System, spec_set=True)
- asio = mock.create_autospec(pvm_ms.ASIOConfig, spec_set=True)
- ms_wrapper.configure_mock(
- proc_units_configurable=500,
- proc_units_avail=500,
- memory_configurable=5242880,
- memory_free=5242752,
- memory_region_size='big',
- asio_config=asio)
- self.flags(host='the_hostname')
-
- # Run the actual test
- stats = pvm_host.build_host_resource_from_ms(ms_wrapper)
- self.assertIsNotNone(stats)
-
- # Check for the presence of fields
- fields = (('vcpus', 500), ('vcpus_used', 0),
- ('memory_mb', 5242880), ('memory_mb_used', 128),
- 'hypervisor_type', 'hypervisor_version',
- ('hypervisor_hostname', 'the_hostname'), 'cpu_info',
- 'supported_instances', 'stats')
- for fld in fields:
- if isinstance(fld, tuple):
- value = stats.get(fld[0], None)
- self.assertEqual(value, fld[1])
- else:
- value = stats.get(fld, None)
- self.assertIsNotNone(value)
- # Check for individual stats
- hstats = (('proc_units', '500.00'), ('proc_units_used', '0.00'))
- for stat in hstats:
- if isinstance(stat, tuple):
- value = stats['stats'].get(stat[0], None)
- self.assertEqual(value, stat[1])
- else:
- value = stats['stats'].get(stat, None)
- self.assertIsNotNone(value)
diff --git a/nova/tests/unit/virt/powervm/test_image.py b/nova/tests/unit/virt/powervm/test_image.py
deleted file mode 100644
index 2db33e6a0f..0000000000
--- a/nova/tests/unit/virt/powervm/test_image.py
+++ /dev/null
@@ -1,55 +0,0 @@
-# Copyright 2015, 2018 IBM Corp.
-#
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import mock
-
-from nova import test
-from nova.virt.powervm import image
-
-
-class TestImage(test.TestCase):
-
- @mock.patch('nova.utils.temporary_chown', autospec=True)
- @mock.patch('nova.image.glance.API', autospec=True)
- def test_stream_blockdev_to_glance(self, mock_api, mock_chown):
- mock_open = mock.mock_open()
- with mock.patch('builtins.open', new=mock_open):
- image.stream_blockdev_to_glance('context', mock_api, 'image_id',
- 'metadata', '/dev/disk')
- mock_chown.assert_called_with('/dev/disk')
- mock_open.assert_called_with('/dev/disk', 'rb')
- mock_api.update.assert_called_with('context', 'image_id', 'metadata',
- mock_open.return_value)
-
- @mock.patch('nova.image.glance.API', autospec=True)
- def test_generate_snapshot_metadata(self, mock_api):
- mock_api.get.return_value = {'name': 'image_name'}
- mock_instance = mock.Mock()
- mock_instance.project_id = 'project_id'
- ret = image.generate_snapshot_metadata('context', mock_api, 'image_id',
- mock_instance)
- mock_api.get.assert_called_with('context', 'image_id')
- self.assertEqual({
- 'name': 'image_name',
- 'status': 'active',
- 'disk_format': 'raw',
- 'container_format': 'bare',
- 'properties': {
- 'image_location': 'snapshot',
- 'image_state': 'available',
- 'owner_id': 'project_id',
- }
- }, ret)
diff --git a/nova/tests/unit/virt/powervm/test_media.py b/nova/tests/unit/virt/powervm/test_media.py
deleted file mode 100644
index f98769e0de..0000000000
--- a/nova/tests/unit/virt/powervm/test_media.py
+++ /dev/null
@@ -1,203 +0,0 @@
-# Copyright 2015, 2017 IBM Corp.
-#
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import fixtures
-import mock
-from oslo_utils.fixture import uuidsentinel
-from pypowervm import const as pvm_const
-from pypowervm.tasks import scsi_mapper as tsk_map
-from pypowervm.tests import test_fixtures as pvm_fx
-from pypowervm.utils import transaction as pvm_tx
-from pypowervm.wrappers import network as pvm_net
-from pypowervm.wrappers import storage as pvm_stg
-from pypowervm.wrappers import virtual_io_server as pvm_vios
-
-from nova import test
-from nova.virt.powervm import media as m
-
-
-class TestConfigDrivePowerVM(test.NoDBTestCase):
- """Unit Tests for the ConfigDrivePowerVM class."""
-
- def setUp(self):
- super(TestConfigDrivePowerVM, self).setUp()
-
- self.apt = self.useFixture(pvm_fx.AdapterFx()).adpt
-
- self.validate_vopt = self.useFixture(fixtures.MockPatch(
- 'pypowervm.tasks.vopt.validate_vopt_repo_exists',
- autospec=True)).mock
- self.validate_vopt.return_value = 'vios_uuid', 'vg_uuid'
-
- @mock.patch('nova.api.metadata.base.InstanceMetadata')
- @mock.patch('nova.virt.configdrive.ConfigDriveBuilder.make_drive')
- def test_crt_cfg_dr_iso(self, mock_mkdrv, mock_meta):
- """Validates that the image creation method works."""
- cfg_dr_builder = m.ConfigDrivePowerVM(self.apt)
- self.assertTrue(self.validate_vopt.called)
- mock_instance = mock.MagicMock()
- mock_instance.uuid = uuidsentinel.inst_id
- mock_files = mock.MagicMock()
- mock_net = mock.MagicMock()
- iso_path = '/tmp/cfgdrv.iso'
- cfg_dr_builder._create_cfg_dr_iso(mock_instance, mock_files, mock_net,
- iso_path)
- self.assertEqual(mock_mkdrv.call_count, 1)
-
- # Test retry iso create
- mock_mkdrv.reset_mock()
- mock_mkdrv.side_effect = [OSError, mock_mkdrv]
- cfg_dr_builder._create_cfg_dr_iso(mock_instance, mock_files, mock_net,
- iso_path)
- self.assertEqual(mock_mkdrv.call_count, 2)
-
- @mock.patch('tempfile.NamedTemporaryFile')
- @mock.patch('nova.virt.powervm.vm.get_pvm_uuid')
- @mock.patch('pypowervm.tasks.scsi_mapper.build_vscsi_mapping')
- @mock.patch('pypowervm.tasks.scsi_mapper.add_map')
- @mock.patch('os.path.getsize')
- @mock.patch('pypowervm.tasks.storage.upload_vopt')
- @mock.patch('nova.virt.powervm.media.ConfigDrivePowerVM.'
- '_create_cfg_dr_iso')
- def test_create_cfg_drv_vopt(self, mock_ccdi, mock_upl, mock_getsize,
- mock_addmap, mock_bldmap, mock_vm_id,
- mock_ntf):
- cfg_dr = m.ConfigDrivePowerVM(self.apt)
- mock_instance = mock.MagicMock()
- mock_instance.uuid = uuidsentinel.inst_id
- mock_upl.return_value = 'vopt', 'f_uuid'
- fh = mock_ntf.return_value.__enter__.return_value
- fh.name = 'iso_path'
- wtsk = mock.create_autospec(pvm_tx.WrapperTask, instance=True)
- ftsk = mock.create_autospec(pvm_tx.FeedTask, instance=True)
- ftsk.configure_mock(wrapper_tasks={'vios_uuid': wtsk})
-
- def test_afs(add_func):
- # Validate the internal add_func
- vio = mock.create_autospec(pvm_vios.VIOS)
- self.assertEqual(mock_addmap.return_value, add_func(vio))
- mock_vm_id.assert_called_once_with(mock_instance)
- mock_bldmap.assert_called_once_with(
- None, vio, mock_vm_id.return_value, 'vopt')
- mock_addmap.assert_called_once_with(vio, mock_bldmap.return_value)
- wtsk.add_functor_subtask.side_effect = test_afs
-
- # calculate expected file name
- expected_file_name = 'cfg_' + mock_instance.uuid.replace('-', '')
- allowed_len = pvm_const.MaxLen.VOPT_NAME - 4 # '.iso' is 4 chars
- expected_file_name = expected_file_name[:allowed_len] + '.iso'
-
- cfg_dr.create_cfg_drv_vopt(
- mock_instance, 'files', 'netinfo', ftsk, admin_pass='pass')
-
- mock_ntf.assert_called_once_with(mode='rb')
- mock_ccdi.assert_called_once_with(mock_instance, 'files', 'netinfo',
- 'iso_path', admin_pass='pass')
- mock_getsize.assert_called_once_with('iso_path')
- mock_upl.assert_called_once_with(self.apt, 'vios_uuid', fh,
- expected_file_name,
- mock_getsize.return_value)
- wtsk.add_functor_subtask.assert_called_once()
-
- def test_sanitize_network_info(self):
- network_info = [{'type': 'lbr'}, {'type': 'pvm_sea'},
- {'type': 'ovs'}]
-
- cfg_dr_builder = m.ConfigDrivePowerVM(self.apt)
-
- resp = cfg_dr_builder._sanitize_network_info(network_info)
- expected_ret = [{'type': 'vif'}, {'type': 'vif'},
- {'type': 'ovs'}]
- self.assertEqual(resp, expected_ret)
-
- @mock.patch('pypowervm.wrappers.storage.VG', autospec=True)
- @mock.patch('pypowervm.tasks.storage.rm_vg_storage', autospec=True)
- @mock.patch('nova.virt.powervm.vm.get_pvm_uuid')
- @mock.patch('pypowervm.tasks.scsi_mapper.gen_match_func', autospec=True)
- @mock.patch('pypowervm.tasks.scsi_mapper.find_maps', autospec=True)
- @mock.patch('pypowervm.wrappers.virtual_io_server.VIOS', autospec=True)
- @mock.patch('taskflow.task.FunctorTask', autospec=True)
- def test_dlt_vopt(self, mock_functask, mock_vios, mock_find_maps, mock_gmf,
- mock_uuid, mock_rmstg, mock_vg):
- cfg_dr = m.ConfigDrivePowerVM(self.apt)
- wtsk = mock.create_autospec(pvm_tx.WrapperTask, instance=True)
- ftsk = mock.create_autospec(pvm_tx.FeedTask, instance=True)
- ftsk.configure_mock(wrapper_tasks={'vios_uuid': wtsk})
-
- # Test with no media to remove
- mock_find_maps.return_value = []
- cfg_dr.dlt_vopt('inst', ftsk)
- mock_uuid.assert_called_once_with('inst')
- mock_gmf.assert_called_once_with(pvm_stg.VOptMedia)
- wtsk.add_functor_subtask.assert_called_once_with(
- tsk_map.remove_maps, mock_uuid.return_value,
- match_func=mock_gmf.return_value)
- ftsk.get_wrapper.assert_called_once_with('vios_uuid')
- mock_find_maps.assert_called_once_with(
- ftsk.get_wrapper.return_value.scsi_mappings,
- client_lpar_id=mock_uuid.return_value,
- match_func=mock_gmf.return_value)
- mock_functask.assert_not_called()
-
- # Test with media to remove
- mock_find_maps.return_value = [mock.Mock(backing_storage=media)
- for media in ['m1', 'm2']]
-
- def test_functor_task(rm_vopt):
- # Validate internal rm_vopt function
- rm_vopt()
- mock_vg.get.assert_called_once_with(
- self.apt, uuid='vg_uuid', parent_type=pvm_vios.VIOS,
- parent_uuid='vios_uuid')
- mock_rmstg.assert_called_once_with(
- mock_vg.get.return_value, vopts=['m1', 'm2'])
- return 'functor_task'
- mock_functask.side_effect = test_functor_task
-
- cfg_dr.dlt_vopt('inst', ftsk)
- mock_functask.assert_called_once()
- ftsk.add_post_execute.assert_called_once_with('functor_task')
-
- def test_mgmt_cna_to_vif(self):
- mock_cna = mock.Mock(spec=pvm_net.CNA, mac="FAD4433ED120")
-
- # Run
- cfg_dr_builder = m.ConfigDrivePowerVM(self.apt)
- vif = cfg_dr_builder._mgmt_cna_to_vif(mock_cna)
-
- # Validate
- self.assertEqual(vif.get('address'), "fa:d4:43:3e:d1:20")
- self.assertEqual(vif.get('id'), 'mgmt_vif')
- self.assertIsNotNone(vif.get('network'))
- self.assertEqual(1, len(vif.get('network').get('subnets')))
- subnet = vif.get('network').get('subnets')[0]
- self.assertEqual(6, subnet.get('version'))
- self.assertEqual('fe80::/64', subnet.get('cidr'))
- ip = subnet.get('ips')[0]
- self.assertEqual('fe80::f8d4:43ff:fe3e:d120', ip.get('address'))
-
- def test_mac_to_link_local(self):
- mac = 'fa:d4:43:3e:d1:20'
- self.assertEqual('fe80::f8d4:43ff:fe3e:d120',
- m.ConfigDrivePowerVM._mac_to_link_local(mac))
-
- mac = '00:00:00:00:00:00'
- self.assertEqual('fe80::0200:00ff:fe00:0000',
- m.ConfigDrivePowerVM._mac_to_link_local(mac))
-
- mac = 'ff:ff:ff:ff:ff:ff'
- self.assertEqual('fe80::fdff:ffff:feff:ffff',
- m.ConfigDrivePowerVM._mac_to_link_local(mac))
diff --git a/nova/tests/unit/virt/powervm/test_mgmt.py b/nova/tests/unit/virt/powervm/test_mgmt.py
deleted file mode 100644
index 5c0098ceeb..0000000000
--- a/nova/tests/unit/virt/powervm/test_mgmt.py
+++ /dev/null
@@ -1,193 +0,0 @@
-# Copyright 2015, 2018 IBM Corp.
-#
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import mock
-import retrying
-
-from nova import exception
-from nova import test
-from pypowervm.tests import test_fixtures as pvm_fx
-from pypowervm.tests.test_utils import pvmhttp
-
-from nova.virt.powervm import mgmt
-
-LPAR_HTTPRESP_FILE = "lpar.txt"
-
-
-class TestMgmt(test.TestCase):
- def setUp(self):
- super(TestMgmt, self).setUp()
- self.apt = self.useFixture(pvm_fx.AdapterFx()).adpt
-
- lpar_http = pvmhttp.load_pvm_resp(LPAR_HTTPRESP_FILE, adapter=self.apt)
- self.assertIsNotNone(
- lpar_http, "Could not load %s " % LPAR_HTTPRESP_FILE)
-
- self.resp = lpar_http.response
-
- @mock.patch('pypowervm.tasks.partition.get_this_partition', autospec=True)
- def test_mgmt_uuid(self, mock_get_partition):
- mock_get_partition.return_value = mock.Mock(uuid='mock_mgmt')
- adpt = mock.Mock()
-
- # First run should call the partition only once
- self.assertEqual('mock_mgmt', mgmt.mgmt_uuid(adpt))
- mock_get_partition.assert_called_once_with(adpt)
-
- # But a subsequent call should effectively no-op
- mock_get_partition.reset_mock()
- self.assertEqual('mock_mgmt', mgmt.mgmt_uuid(adpt))
- self.assertEqual(mock_get_partition.call_count, 0)
-
- @mock.patch('glob.glob', autospec=True)
- @mock.patch('nova.privsep.path.writefile', autospec=True)
- @mock.patch('os.path.realpath', autospec=True)
- def test_discover_vscsi_disk(self, mock_realpath, mock_writefile,
- mock_glob):
- scanpath = '/sys/bus/vio/devices/30000005/host*/scsi_host/host*/scan'
- udid = ('275b5d5f88fa5611e48be9000098be9400'
- '13fb2aa55a2d7b8d150cb1b7b6bc04d6')
- devlink = ('/dev/disk/by-id/scsi-SIBM_3303_NVDISK' + udid)
- mapping = mock.Mock()
- mapping.client_adapter.lpar_slot_num = 5
- mapping.backing_storage.udid = udid
- # Realistically, first glob would return e.g. .../host0/.../host0/...
- # but it doesn't matter for test purposes.
- mock_glob.side_effect = [[scanpath], [devlink]]
- mgmt.discover_vscsi_disk(mapping)
- mock_glob.assert_has_calls(
- [mock.call(scanpath), mock.call('/dev/disk/by-id/*' + udid[-32:])])
- mock_writefile.assert_called_once_with(scanpath, 'a', '- - -')
- mock_realpath.assert_called_with(devlink)
-
- @mock.patch('retrying.retry', autospec=True)
- @mock.patch('glob.glob', autospec=True)
- @mock.patch('nova.privsep.path.writefile', autospec=True)
- def test_discover_vscsi_disk_not_one_result(self, mock_writefile,
- mock_glob, mock_retry):
- """Zero or more than one disk is found by discover_vscsi_disk."""
- def validate_retry(kwargs):
- self.assertIn('retry_on_result', kwargs)
- self.assertEqual(250, kwargs['wait_fixed'])
- self.assertEqual(300000, kwargs['stop_max_delay'])
-
- def raiser(unused):
- raise retrying.RetryError(mock.Mock(attempt_number=123))
-
- def retry_passthrough(**kwargs):
- validate_retry(kwargs)
-
- def wrapped(_poll_for_dev):
- return _poll_for_dev
- return wrapped
-
- def retry_timeout(**kwargs):
- validate_retry(kwargs)
-
- def wrapped(_poll_for_dev):
- return raiser
- return wrapped
-
- udid = ('275b5d5f88fa5611e48be9000098be9400'
- '13fb2aa55a2d7b8d150cb1b7b6bc04d6')
- mapping = mock.Mock()
- mapping.client_adapter.lpar_slot_num = 5
- mapping.backing_storage.udid = udid
- # No disks found
- mock_retry.side_effect = retry_timeout
- mock_glob.side_effect = lambda path: []
- self.assertRaises(exception.NoDiskDiscoveryException,
- mgmt.discover_vscsi_disk, mapping)
- # Multiple disks found
- mock_retry.side_effect = retry_passthrough
- mock_glob.side_effect = [['path'], ['/dev/sde', '/dev/sdf']]
- self.assertRaises(exception.UniqueDiskDiscoveryException,
- mgmt.discover_vscsi_disk, mapping)
-
- @mock.patch('time.sleep', autospec=True)
- @mock.patch('os.path.realpath', autospec=True)
- @mock.patch('os.stat', autospec=True)
- @mock.patch('nova.privsep.path.writefile', autospec=True)
- def test_remove_block_dev(self, mock_writefile, mock_stat, mock_realpath,
- mock_sleep):
- link = '/dev/link/foo'
- realpath = '/dev/sde'
- delpath = '/sys/block/sde/device/delete'
- mock_realpath.return_value = realpath
-
- # Good path
- mock_stat.side_effect = (None, None, OSError())
- mgmt.remove_block_dev(link)
- mock_realpath.assert_called_with(link)
- mock_stat.assert_has_calls([mock.call(realpath), mock.call(delpath),
- mock.call(realpath)])
- mock_writefile.assert_called_once_with(delpath, 'a', '1')
- self.assertEqual(0, mock_sleep.call_count)
-
- # Device param not found
- mock_writefile.reset_mock()
- mock_stat.reset_mock()
- mock_stat.side_effect = (OSError(), None, None)
- self.assertRaises(exception.InvalidDevicePath, mgmt.remove_block_dev,
- link)
- # stat was called once; exec was not called
- self.assertEqual(1, mock_stat.call_count)
- self.assertEqual(0, mock_writefile.call_count)
-
- # Delete special file not found
- mock_writefile.reset_mock()
- mock_stat.reset_mock()
- mock_stat.side_effect = (None, OSError(), None)
- self.assertRaises(exception.InvalidDevicePath, mgmt.remove_block_dev,
- link)
- # stat was called twice; exec was not called
- self.assertEqual(2, mock_stat.call_count)
- self.assertEqual(0, mock_writefile.call_count)
-
- @mock.patch('retrying.retry')
- @mock.patch('os.path.realpath')
- @mock.patch('os.stat')
- @mock.patch('nova.privsep.path.writefile')
- def test_remove_block_dev_timeout(self, mock_dacw, mock_stat,
- mock_realpath, mock_retry):
-
- def validate_retry(kwargs):
- self.assertIn('retry_on_result', kwargs)
- self.assertEqual(250, kwargs['wait_fixed'])
- self.assertEqual(10000, kwargs['stop_max_delay'])
-
- def raiser(unused):
- raise retrying.RetryError(mock.Mock(attempt_number=123))
-
- def retry_timeout(**kwargs):
- validate_retry(kwargs)
-
- def wrapped(_poll_for_del):
- return raiser
- return wrapped
-
- # Deletion was attempted, but device is still there
- link = '/dev/link/foo'
- delpath = '/sys/block/sde/device/delete'
- realpath = '/dev/sde'
- mock_realpath.return_value = realpath
- mock_stat.side_effect = lambda path: 1
- mock_retry.side_effect = retry_timeout
-
- self.assertRaises(
- exception.DeviceDeletionException, mgmt.remove_block_dev, link)
- mock_realpath.assert_called_once_with(link)
- mock_dacw.assert_called_with(delpath, 'a', '1')
diff --git a/nova/tests/unit/virt/powervm/test_vif.py b/nova/tests/unit/virt/powervm/test_vif.py
deleted file mode 100644
index 985c48abe5..0000000000
--- a/nova/tests/unit/virt/powervm/test_vif.py
+++ /dev/null
@@ -1,327 +0,0 @@
-# Copyright 2017 IBM Corp.
-#
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import mock
-from pypowervm import exceptions as pvm_ex
-from pypowervm.wrappers import network as pvm_net
-
-from nova import exception
-from nova.network import model
-from nova import test
-from nova.virt.powervm import vif
-
-
-def cna(mac):
- """Builds a mock Client Network Adapter for unit tests."""
- return mock.Mock(spec=pvm_net.CNA, mac=mac, vswitch_uri='fake_href')
-
-
-class TestVifFunctions(test.NoDBTestCase):
-
- def setUp(self):
- super(TestVifFunctions, self).setUp()
-
- self.adpt = mock.Mock()
-
- @mock.patch('nova.virt.powervm.vif.PvmOvsVifDriver')
- def test_build_vif_driver(self, mock_driver):
- # Valid vif type
- driver = vif._build_vif_driver(self.adpt, 'instance', {'type': 'ovs'})
- self.assertEqual(mock_driver.return_value, driver)
-
- mock_driver.reset_mock()
-
- # Fail if no vif type
- self.assertRaises(exception.VirtualInterfacePlugException,
- vif._build_vif_driver, self.adpt, 'instance',
- {'type': None})
- mock_driver.assert_not_called()
-
- # Fail if invalid vif type
- self.assertRaises(exception.VirtualInterfacePlugException,
- vif._build_vif_driver, self.adpt, 'instance',
- {'type': 'bad_type'})
- mock_driver.assert_not_called()
-
- @mock.patch('oslo_serialization.jsonutils.dumps')
- @mock.patch('pypowervm.wrappers.event.Event')
- def test_push_vif_event(self, mock_event, mock_dumps):
- mock_vif = mock.Mock(mac='MAC', href='HREF')
- vif._push_vif_event(self.adpt, 'action', mock_vif, mock.Mock(),
- 'pvm_sea')
- mock_dumps.assert_called_once_with(
- {'provider': 'NOVA_PVM_VIF', 'action': 'action', 'mac': 'MAC',
- 'type': 'pvm_sea'})
- mock_event.bld.assert_called_once_with(self.adpt, 'HREF',
- mock_dumps.return_value)
- mock_event.bld.return_value.create.assert_called_once_with()
-
- mock_dumps.reset_mock()
- mock_event.bld.reset_mock()
- mock_event.bld.return_value.create.reset_mock()
-
- # Exception reraises
- mock_event.bld.return_value.create.side_effect = IndexError
- self.assertRaises(IndexError, vif._push_vif_event, self.adpt, 'action',
- mock_vif, mock.Mock(), 'pvm_sea')
- mock_dumps.assert_called_once_with(
- {'provider': 'NOVA_PVM_VIF', 'action': 'action', 'mac': 'MAC',
- 'type': 'pvm_sea'})
- mock_event.bld.assert_called_once_with(self.adpt, 'HREF',
- mock_dumps.return_value)
- mock_event.bld.return_value.create.assert_called_once_with()
-
- @mock.patch('nova.virt.powervm.vif._push_vif_event')
- @mock.patch('nova.virt.powervm.vif._build_vif_driver')
- def test_plug(self, mock_bld_drv, mock_event):
- """Test the top-level plug method."""
- mock_vif = {'address': 'MAC', 'type': 'pvm_sea'}
-
- # 1) With new_vif=True (default)
- vnet = vif.plug(self.adpt, 'instance', mock_vif)
-
- mock_bld_drv.assert_called_once_with(self.adpt, 'instance', mock_vif)
- mock_bld_drv.return_value.plug.assert_called_once_with(mock_vif,
- new_vif=True)
- self.assertEqual(mock_bld_drv.return_value.plug.return_value, vnet)
- mock_event.assert_called_once_with(self.adpt, 'plug', vnet, mock.ANY,
- 'pvm_sea')
-
- # Clean up
- mock_bld_drv.reset_mock()
- mock_bld_drv.return_value.plug.reset_mock()
- mock_event.reset_mock()
-
- # 2) Plug returns None (which it should IRL whenever new_vif=False).
- mock_bld_drv.return_value.plug.return_value = None
- vnet = vif.plug(self.adpt, 'instance', mock_vif, new_vif=False)
-
- mock_bld_drv.assert_called_once_with(self.adpt, 'instance', mock_vif)
- mock_bld_drv.return_value.plug.assert_called_once_with(mock_vif,
- new_vif=False)
- self.assertIsNone(vnet)
- mock_event.assert_not_called()
-
- @mock.patch('nova.virt.powervm.vif._build_vif_driver')
- def test_plug_raises(self, mock_vif_drv):
- """HttpError is converted to VirtualInterfacePlugException."""
- vif_drv = mock.Mock(plug=mock.Mock(side_effect=pvm_ex.HttpError(
- resp=mock.Mock())))
- mock_vif_drv.return_value = vif_drv
- mock_vif = {'address': 'vifaddr'}
- self.assertRaises(exception.VirtualInterfacePlugException,
- vif.plug, 'adap', 'inst', mock_vif,
- new_vif='new_vif')
- mock_vif_drv.assert_called_once_with('adap', 'inst', mock_vif)
- vif_drv.plug.assert_called_once_with(mock_vif, new_vif='new_vif')
-
- @mock.patch('nova.virt.powervm.vif._push_vif_event')
- @mock.patch('nova.virt.powervm.vif._build_vif_driver')
- def test_unplug(self, mock_bld_drv, mock_event):
- """Test the top-level unplug method."""
- mock_vif = {'address': 'MAC', 'type': 'pvm_sea'}
-
- # 1) With default cna_w_list
- mock_bld_drv.return_value.unplug.return_value = 'vnet_w'
- vif.unplug(self.adpt, 'instance', mock_vif)
- mock_bld_drv.assert_called_once_with(self.adpt, 'instance', mock_vif)
- mock_bld_drv.return_value.unplug.assert_called_once_with(
- mock_vif, cna_w_list=None)
- mock_event.assert_called_once_with(self.adpt, 'unplug', 'vnet_w',
- mock.ANY, 'pvm_sea')
- # Clean up
- mock_bld_drv.reset_mock()
- mock_bld_drv.return_value.unplug.reset_mock()
- mock_event.reset_mock()
-
- # 2) With specified cna_w_list
- mock_bld_drv.return_value.unplug.return_value = None
- vif.unplug(self.adpt, 'instance', mock_vif, cna_w_list='cnalist')
- mock_bld_drv.assert_called_once_with(self.adpt, 'instance', mock_vif)
- mock_bld_drv.return_value.unplug.assert_called_once_with(
- mock_vif, cna_w_list='cnalist')
- mock_event.assert_not_called()
-
- @mock.patch('nova.virt.powervm.vif._build_vif_driver')
- def test_unplug_raises(self, mock_vif_drv):
- """HttpError is converted to VirtualInterfacePlugException."""
- vif_drv = mock.Mock(unplug=mock.Mock(side_effect=pvm_ex.HttpError(
- resp=mock.Mock())))
- mock_vif_drv.return_value = vif_drv
- mock_vif = {'address': 'vifaddr'}
- self.assertRaises(exception.VirtualInterfaceUnplugException,
- vif.unplug, 'adap', 'inst', mock_vif,
- cna_w_list='cna_w_list')
- mock_vif_drv.assert_called_once_with('adap', 'inst', mock_vif)
- vif_drv.unplug.assert_called_once_with(
- mock_vif, cna_w_list='cna_w_list')
-
-
-class TestVifOvsDriver(test.NoDBTestCase):
-
- def setUp(self):
- super(TestVifOvsDriver, self).setUp()
-
- self.adpt = mock.Mock()
- self.inst = mock.MagicMock(uuid='inst_uuid')
- self.drv = vif.PvmOvsVifDriver(self.adpt, self.inst)
-
- @mock.patch('pypowervm.tasks.cna.crt_p2p_cna', autospec=True)
- @mock.patch('pypowervm.tasks.partition.get_this_partition', autospec=True)
- @mock.patch('nova.virt.powervm.vm.get_pvm_uuid')
- def test_plug(self, mock_pvm_uuid, mock_mgmt_lpar, mock_p2p_cna,):
- # Mock the data
- mock_pvm_uuid.return_value = 'lpar_uuid'
- mock_mgmt_lpar.return_value = mock.Mock(uuid='mgmt_uuid')
- # mock_trunk_dev_name.return_value = 'device'
-
- cna_w, trunk_wraps = mock.MagicMock(), [mock.MagicMock()]
- mock_p2p_cna.return_value = cna_w, trunk_wraps
-
- # Run the plug
- network_model = model.Model({'bridge': 'br0', 'meta': {'mtu': 1450}})
- mock_vif = model.VIF(address='aa:bb:cc:dd:ee:ff', id='vif_id',
- network=network_model, devname='device')
- self.drv.plug(mock_vif)
-
- # Validate the calls
- ovs_ext_ids = ('iface-id=vif_id,iface-status=active,'
- 'attached-mac=aa:bb:cc:dd:ee:ff,vm-uuid=inst_uuid')
- mock_p2p_cna.assert_called_once_with(
- self.adpt, None, 'lpar_uuid', ['mgmt_uuid'],
- 'NovaLinkVEABridge', configured_mtu=1450, crt_vswitch=True,
- mac_addr='aa:bb:cc:dd:ee:ff', dev_name='device', ovs_bridge='br0',
- ovs_ext_ids=ovs_ext_ids)
-
- @mock.patch('pypowervm.tasks.partition.get_this_partition', autospec=True)
- @mock.patch('nova.virt.powervm.vm.get_pvm_uuid')
- @mock.patch('nova.virt.powervm.vm.get_cnas')
- @mock.patch('pypowervm.tasks.cna.find_trunks', autospec=True)
- def test_plug_existing_vif(self, mock_find_trunks, mock_get_cnas,
- mock_pvm_uuid, mock_mgmt_lpar):
- # Mock the data
- t1, t2 = mock.MagicMock(), mock.MagicMock()
- mock_find_trunks.return_value = [t1, t2]
-
- mock_cna = mock.Mock(mac='aa:bb:cc:dd:ee:ff')
- mock_get_cnas.return_value = [mock_cna]
-
- mock_pvm_uuid.return_value = 'lpar_uuid'
-
- mock_mgmt_lpar.return_value = mock.Mock(uuid='mgmt_uuid')
-
- self.inst = mock.MagicMock(uuid='c2e7ff9f-b9b6-46fa-8716-93bbb795b8b4')
- self.drv = vif.PvmOvsVifDriver(self.adpt, self.inst)
-
- # Run the plug
- network_model = model.Model({'bridge': 'br0', 'meta': {'mtu': 1500}})
- mock_vif = model.VIF(address='aa:bb:cc:dd:ee:ff', id='vif_id',
- network=network_model, devname='devname')
- resp = self.drv.plug(mock_vif, new_vif=False)
-
- self.assertIsNone(resp)
-
- # Validate if trunk.update got invoked for all trunks of CNA of vif
- self.assertTrue(t1.update.called)
- self.assertTrue(t2.update.called)
-
- @mock.patch('pypowervm.tasks.cna.find_trunks')
- @mock.patch('nova.virt.powervm.vm.get_cnas')
- def test_unplug(self, mock_get_cnas, mock_find_trunks):
- # Set up the mocks
- mock_cna = mock.Mock(mac='aa:bb:cc:dd:ee:ff')
- mock_get_cnas.return_value = [mock_cna]
-
- t1, t2 = mock.MagicMock(), mock.MagicMock()
- mock_find_trunks.return_value = [t1, t2]
-
- # Call the unplug
- mock_vif = {'address': 'aa:bb:cc:dd:ee:ff',
- 'network': {'bridge': 'br-int'}}
- self.drv.unplug(mock_vif)
-
- # The trunks and the cna should have been deleted
- self.assertTrue(t1.delete.called)
- self.assertTrue(t2.delete.called)
- self.assertTrue(mock_cna.delete.called)
-
-
-class TestVifSeaDriver(test.NoDBTestCase):
-
- def setUp(self):
- super(TestVifSeaDriver, self).setUp()
-
- self.adpt = mock.Mock()
- self.inst = mock.Mock()
- self.drv = vif.PvmSeaVifDriver(self.adpt, self.inst)
-
- @mock.patch('nova.virt.powervm.vm.get_pvm_uuid')
- @mock.patch('pypowervm.tasks.cna.crt_cna')
- def test_plug_from_neutron(self, mock_crt_cna, mock_pvm_uuid):
- """Tests that a VIF can be created. Mocks Neutron net"""
-
- # Set up the mocks. Look like Neutron
- fake_vif = {'details': {'vlan': 5}, 'network': {'meta': {}},
- 'address': 'aabbccddeeff'}
-
- def validate_crt(adpt, host_uuid, lpar_uuid, vlan, mac_addr=None):
- self.assertIsNone(host_uuid)
- self.assertEqual(5, vlan)
- self.assertEqual('aabbccddeeff', mac_addr)
- return pvm_net.CNA.bld(self.adpt, 5, 'host_uuid',
- mac_addr=mac_addr)
- mock_crt_cna.side_effect = validate_crt
-
- # Invoke
- resp = self.drv.plug(fake_vif)
-
- # Validate (along with validate method above)
- self.assertEqual(1, mock_crt_cna.call_count)
- self.assertIsNotNone(resp)
- self.assertIsInstance(resp, pvm_net.CNA)
-
- def test_plug_existing_vif(self):
- """Tests that a VIF need not be created."""
-
- # Set up the mocks
- fake_vif = {'network': {'meta': {'vlan': 5}},
- 'address': 'aabbccddeeff'}
-
- # Invoke
- resp = self.drv.plug(fake_vif, new_vif=False)
-
- self.assertIsNone(resp)
-
- @mock.patch('nova.virt.powervm.vm.get_cnas')
- def test_unplug_vifs(self, mock_vm_get):
- """Tests that a delete of the vif can be done."""
- # Mock up the CNA response. Two should already exist, the other
- # should not.
- cnas = [cna('AABBCCDDEEFF'), cna('AABBCCDDEE11'), cna('AABBCCDDEE22')]
- mock_vm_get.return_value = cnas
-
- # Run method. The AABBCCDDEE11 won't be unplugged (wasn't invoked
- # below) and the last unplug will also just no-op because its not on
- # the VM.
- self.drv.unplug({'address': 'aa:bb:cc:dd:ee:ff'})
- self.drv.unplug({'address': 'aa:bb:cc:dd:ee:22'})
- self.drv.unplug({'address': 'aa:bb:cc:dd:ee:33'})
-
- # The delete should have only been called once for each applicable vif.
- # The second CNA didn't have a matching mac so it should be skipped.
- self.assertEqual(1, cnas[0].delete.call_count)
- self.assertEqual(0, cnas[1].delete.call_count)
- self.assertEqual(1, cnas[2].delete.call_count)
diff --git a/nova/tests/unit/virt/powervm/test_vm.py b/nova/tests/unit/virt/powervm/test_vm.py
deleted file mode 100644
index ab0f9c35e8..0000000000
--- a/nova/tests/unit/virt/powervm/test_vm.py
+++ /dev/null
@@ -1,563 +0,0 @@
-# Copyright 2014, 2017 IBM Corp.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import fixtures
-import mock
-from pypowervm import exceptions as pvm_exc
-from pypowervm.helpers import log_helper as pvm_log
-from pypowervm.tests import test_fixtures as pvm_fx
-from pypowervm.utils import lpar_builder as lpar_bld
-from pypowervm.utils import uuid as pvm_uuid
-from pypowervm.wrappers import base_partition as pvm_bp
-from pypowervm.wrappers import logical_partition as pvm_lpar
-
-from nova.compute import power_state
-from nova import exception
-from nova import test
-from nova.tests.unit.virt import powervm
-from nova.virt.powervm import vm
-
-
-class TestVMBuilder(test.NoDBTestCase):
-
- def setUp(self):
- super(TestVMBuilder, self).setUp()
-
- self.adpt = mock.MagicMock()
- self.host_w = mock.MagicMock()
- self.lpar_b = vm.VMBuilder(self.host_w, self.adpt)
-
- self.san_lpar_name = self.useFixture(fixtures.MockPatch(
- 'pypowervm.util.sanitize_partition_name_for_api',
- autospec=True)).mock
-
- self.inst = powervm.TEST_INSTANCE
-
- @mock.patch('pypowervm.utils.lpar_builder.DefaultStandardize',
- autospec=True)
- @mock.patch('nova.virt.powervm.vm.get_pvm_uuid')
- @mock.patch('pypowervm.utils.lpar_builder.LPARBuilder', autospec=True)
- def test_vm_builder(self, mock_lpar_bldr, mock_uuid2pvm, mock_def_stdz):
- inst = mock.Mock()
- inst.configure_mock(
- name='lpar_name', uuid='lpar_uuid',
- flavor=mock.Mock(memory_mb='mem', vcpus='vcpus', extra_specs={}))
- vmb = vm.VMBuilder('host', 'adap')
- mock_def_stdz.assert_called_once_with('host', proc_units_factor=0.1)
- self.assertEqual(mock_lpar_bldr.return_value,
- vmb.lpar_builder(inst))
- self.san_lpar_name.assert_called_once_with('lpar_name')
- mock_uuid2pvm.assert_called_once_with(inst)
- mock_lpar_bldr.assert_called_once_with(
- 'adap', {'name': self.san_lpar_name.return_value,
- 'uuid': mock_uuid2pvm.return_value,
- 'memory': 'mem',
- 'vcpu': 'vcpus',
- 'srr_capability': True}, mock_def_stdz.return_value)
-
- # Assert non-default proc_units_factor.
- mock_def_stdz.reset_mock()
- self.flags(proc_units_factor=0.2, group='powervm')
- vmb = vm.VMBuilder('host', 'adap')
- mock_def_stdz.assert_called_once_with('host', proc_units_factor=0.2)
-
- def test_format_flavor(self):
- """Perform tests against _format_flavor."""
- # convert instance uuid to pypowervm uuid
- # LP 1561128, simplified remote restart is enabled by default
- lpar_attrs = {'memory': 2048,
- 'name': self.san_lpar_name.return_value,
- 'uuid': pvm_uuid.convert_uuid_to_pvm(
- self.inst.uuid).upper(),
- 'vcpu': 1, 'srr_capability': True}
-
- # Test dedicated procs
- self.inst.flavor.extra_specs = {'powervm:dedicated_proc': 'true'}
- test_attrs = dict(lpar_attrs, dedicated_proc='true')
-
- self.assertEqual(self.lpar_b._format_flavor(self.inst), test_attrs)
- self.san_lpar_name.assert_called_with(self.inst.name)
- self.san_lpar_name.reset_mock()
-
- # Test dedicated procs, min/max vcpu and sharing mode
- self.inst.flavor.extra_specs = {'powervm:dedicated_proc': 'true',
- 'powervm:dedicated_sharing_mode':
- 'share_idle_procs_active',
- 'powervm:min_vcpu': '1',
- 'powervm:max_vcpu': '3'}
- test_attrs = dict(lpar_attrs,
- dedicated_proc='true',
- sharing_mode='sre idle procs active',
- min_vcpu='1', max_vcpu='3')
- self.assertEqual(self.lpar_b._format_flavor(self.inst), test_attrs)
- self.san_lpar_name.assert_called_with(self.inst.name)
- self.san_lpar_name.reset_mock()
-
- # Test shared proc sharing mode
- self.inst.flavor.extra_specs = {'powervm:uncapped': 'true'}
- test_attrs = dict(lpar_attrs, sharing_mode='uncapped')
- self.assertEqual(self.lpar_b._format_flavor(self.inst), test_attrs)
- self.san_lpar_name.assert_called_with(self.inst.name)
- self.san_lpar_name.reset_mock()
-
- # Test availability priority
- self.inst.flavor.extra_specs = {'powervm:availability_priority': '150'}
- test_attrs = dict(lpar_attrs, avail_priority='150')
- self.assertEqual(self.lpar_b._format_flavor(self.inst), test_attrs)
- self.san_lpar_name.assert_called_with(self.inst.name)
- self.san_lpar_name.reset_mock()
-
- # Test processor compatibility
- self.inst.flavor.extra_specs = {
- 'powervm:processor_compatibility': 'POWER8'}
- test_attrs = dict(lpar_attrs, processor_compatibility='POWER8')
- self.assertEqual(self.lpar_b._format_flavor(self.inst), test_attrs)
- self.san_lpar_name.assert_called_with(self.inst.name)
- self.san_lpar_name.reset_mock()
-
- # Test min, max proc units
- self.inst.flavor.extra_specs = {'powervm:min_proc_units': '0.5',
- 'powervm:max_proc_units': '2.0'}
- test_attrs = dict(lpar_attrs, min_proc_units='0.5',
- max_proc_units='2.0')
- self.assertEqual(self.lpar_b._format_flavor(self.inst), test_attrs)
- self.san_lpar_name.assert_called_with(self.inst.name)
- self.san_lpar_name.reset_mock()
-
- # Test min, max mem
- self.inst.flavor.extra_specs = {'powervm:min_mem': '1024',
- 'powervm:max_mem': '4096'}
- test_attrs = dict(lpar_attrs, min_mem='1024', max_mem='4096')
- self.assertEqual(self.lpar_b._format_flavor(self.inst), test_attrs)
- self.san_lpar_name.assert_called_with(self.inst.name)
- self.san_lpar_name.reset_mock()
-
- # Test remote restart set to false
- self.inst.flavor.extra_specs = {'powervm:srr_capability': 'false'}
- test_attrs = dict(lpar_attrs, srr_capability=False)
- self.assertEqual(self.lpar_b._format_flavor(self.inst), test_attrs)
-
- # Unhandled powervm: key is ignored
- self.inst.flavor.extra_specs = {'powervm:srr_capability': 'false',
- 'powervm:something_new': 'foo'}
- test_attrs = dict(lpar_attrs, srr_capability=False)
- self.assertEqual(self.lpar_b._format_flavor(self.inst), test_attrs)
-
- # If we recognize a key, but don't handle it, we raise
- with mock.patch.object(self.lpar_b, '_is_pvm_valid_key',
- return_value=True):
- self.inst.flavor.extra_specs = {'powervm:srr_capability': 'false',
- 'powervm:something_new': 'foo'}
- self.assertRaises(KeyError, self.lpar_b._format_flavor, self.inst)
-
- @mock.patch('pypowervm.wrappers.shared_proc_pool.SharedProcPool.search')
- def test_spp_pool_id(self, mock_search):
- # The default pool is always zero. Validate the path.
- self.assertEqual(0, self.lpar_b._spp_pool_id('DefaultPool'))
- self.assertEqual(0, self.lpar_b._spp_pool_id(None))
-
- # Further invocations require calls to the adapter. Build a minimal
- # mocked SPP wrapper
- spp = mock.MagicMock()
- spp.id = 1
-
- # Three invocations. First has too many elems. Second has none.
- # Third is just right. :-)
- mock_search.side_effect = [[spp, spp], [], [spp]]
-
- self.assertRaises(exception.ValidationError, self.lpar_b._spp_pool_id,
- 'fake_name')
- self.assertRaises(exception.ValidationError, self.lpar_b._spp_pool_id,
- 'fake_name')
-
- self.assertEqual(1, self.lpar_b._spp_pool_id('fake_name'))
-
-
-class TestVM(test.NoDBTestCase):
- def setUp(self):
- super(TestVM, self).setUp()
-
- self.apt = self.useFixture(pvm_fx.AdapterFx(
- traits=pvm_fx.LocalPVMTraits)).adpt
- self.apt.helpers = [pvm_log.log_helper]
-
- self.san_lpar_name = self.useFixture(fixtures.MockPatch(
- 'pypowervm.util.sanitize_partition_name_for_api')).mock
- self.san_lpar_name.side_effect = lambda name: name
- mock_entries = [mock.Mock(), mock.Mock()]
- self.resp = mock.MagicMock()
- self.resp.feed = mock.MagicMock(entries=mock_entries)
-
- self.get_pvm_uuid = self.useFixture(fixtures.MockPatch(
- 'nova.virt.powervm.vm.get_pvm_uuid')).mock
-
- self.inst = powervm.TEST_INSTANCE
-
- def test_translate_vm_state(self):
- self.assertEqual(power_state.RUNNING,
- vm._translate_vm_state('running'))
- self.assertEqual(power_state.RUNNING,
- vm._translate_vm_state('migrating running'))
- self.assertEqual(power_state.RUNNING,
- vm._translate_vm_state('starting'))
- self.assertEqual(power_state.RUNNING,
- vm._translate_vm_state('open firmware'))
- self.assertEqual(power_state.RUNNING,
- vm._translate_vm_state('shutting down'))
- self.assertEqual(power_state.RUNNING,
- vm._translate_vm_state('suspending'))
-
- self.assertEqual(power_state.SHUTDOWN,
- vm._translate_vm_state('migrating not active'))
- self.assertEqual(power_state.SHUTDOWN,
- vm._translate_vm_state('not activated'))
-
- self.assertEqual(power_state.NOSTATE,
- vm._translate_vm_state('unknown'))
- self.assertEqual(power_state.NOSTATE,
- vm._translate_vm_state('hardware discovery'))
- self.assertEqual(power_state.NOSTATE,
- vm._translate_vm_state('not available'))
-
- self.assertEqual(power_state.SUSPENDED,
- vm._translate_vm_state('resuming'))
- self.assertEqual(power_state.SUSPENDED,
- vm._translate_vm_state('suspended'))
-
- self.assertEqual(power_state.CRASHED,
- vm._translate_vm_state('error'))
-
- @mock.patch('pypowervm.wrappers.logical_partition.LPAR', autospec=True)
- def test_get_lpar_names(self, mock_lpar):
- inst1 = mock.Mock()
- inst1.configure_mock(name='inst1')
- inst2 = mock.Mock()
- inst2.configure_mock(name='inst2')
- mock_lpar.search.return_value = [inst1, inst2]
- self.assertEqual({'inst1', 'inst2'}, set(vm.get_lpar_names('adap')))
- mock_lpar.search.assert_called_once_with(
- 'adap', is_mgmt_partition=False)
-
- @mock.patch('pypowervm.tasks.vterm.close_vterm', autospec=True)
- def test_dlt_lpar(self, mock_vterm):
- """Performs a delete LPAR test."""
- vm.delete_lpar(self.apt, 'inst')
- self.get_pvm_uuid.assert_called_once_with('inst')
- self.apt.delete.assert_called_once_with(
- pvm_lpar.LPAR.schema_type, root_id=self.get_pvm_uuid.return_value)
- self.assertEqual(1, mock_vterm.call_count)
-
- # Test Failure Path
- # build a mock response body with the expected HSCL msg
- resp = mock.Mock()
- resp.body = 'error msg: HSCL151B more text'
- self.apt.delete.side_effect = pvm_exc.Error(
- 'Mock Error Message', response=resp)
-
- # Reset counters
- self.apt.reset_mock()
- mock_vterm.reset_mock()
-
- self.assertRaises(pvm_exc.Error, vm.delete_lpar, self.apt, 'inst')
- self.assertEqual(1, mock_vterm.call_count)
- self.assertEqual(1, self.apt.delete.call_count)
-
- self.apt.reset_mock()
- mock_vterm.reset_mock()
-
- # Test HttpError 404
- resp.status = 404
- self.apt.delete.side_effect = pvm_exc.HttpError(resp=resp)
- vm.delete_lpar(self.apt, 'inst')
- self.assertEqual(1, mock_vterm.call_count)
- self.assertEqual(1, self.apt.delete.call_count)
-
- self.apt.reset_mock()
- mock_vterm.reset_mock()
-
- # Test Other HttpError
- resp.status = 111
- self.apt.delete.side_effect = pvm_exc.HttpError(resp=resp)
- self.assertRaises(pvm_exc.HttpError, vm.delete_lpar, self.apt, 'inst')
- self.assertEqual(1, mock_vterm.call_count)
- self.assertEqual(1, self.apt.delete.call_count)
-
- self.apt.reset_mock()
- mock_vterm.reset_mock()
-
- # Test HttpError 404 closing vterm
- resp.status = 404
- mock_vterm.side_effect = pvm_exc.HttpError(resp=resp)
- vm.delete_lpar(self.apt, 'inst')
- self.assertEqual(1, mock_vterm.call_count)
- self.assertEqual(0, self.apt.delete.call_count)
-
- self.apt.reset_mock()
- mock_vterm.reset_mock()
-
- # Test Other HttpError closing vterm
- resp.status = 111
- mock_vterm.side_effect = pvm_exc.HttpError(resp=resp)
- self.assertRaises(pvm_exc.HttpError, vm.delete_lpar, self.apt, 'inst')
- self.assertEqual(1, mock_vterm.call_count)
- self.assertEqual(0, self.apt.delete.call_count)
-
- @mock.patch('nova.virt.powervm.vm.VMBuilder', autospec=True)
- @mock.patch('pypowervm.utils.validation.LPARWrapperValidator',
- autospec=True)
- def test_crt_lpar(self, mock_vld, mock_vmbldr):
- self.inst.flavor.extra_specs = {'powervm:dedicated_proc': 'true'}
- mock_bldr = mock.Mock(spec=lpar_bld.LPARBuilder)
- mock_vmbldr.return_value.lpar_builder.return_value = mock_bldr
- mock_pend_lpar = mock.create_autospec(pvm_lpar.LPAR, instance=True)
- mock_bldr.build.return_value = mock_pend_lpar
-
- vm.create_lpar(self.apt, 'host', self.inst)
- mock_vmbldr.assert_called_once_with('host', self.apt)
- mock_vmbldr.return_value.lpar_builder.assert_called_once_with(
- self.inst)
- mock_bldr.build.assert_called_once_with()
- mock_vld.assert_called_once_with(mock_pend_lpar, 'host')
- mock_vld.return_value.validate_all.assert_called_once_with()
- mock_pend_lpar.create.assert_called_once_with(parent='host')
-
- # Test to verify the LPAR Creation with invalid name specification
- mock_vmbldr.side_effect = lpar_bld.LPARBuilderException("Invalid Name")
- self.assertRaises(exception.BuildAbortException,
- vm.create_lpar, self.apt, 'host', self.inst)
-
- # HttpError
- mock_vmbldr.side_effect = pvm_exc.HttpError(mock.Mock())
- self.assertRaises(exception.PowerVMAPIFailed,
- vm.create_lpar, self.apt, 'host', self.inst)
-
- @mock.patch('pypowervm.wrappers.logical_partition.LPAR', autospec=True)
- def test_get_instance_wrapper(self, mock_lpar):
- resp = mock.Mock(status=404)
- mock_lpar.get.side_effect = pvm_exc.Error('message', response=resp)
- # vm.get_instance_wrapper(self.apt, instance, 'lpar_uuid')
- self.assertRaises(exception.InstanceNotFound, vm.get_instance_wrapper,
- self.apt, self.inst)
-
- @mock.patch('pypowervm.tasks.power.power_on', autospec=True)
- @mock.patch('oslo_concurrency.lockutils.lock', autospec=True)
- @mock.patch('nova.virt.powervm.vm.get_instance_wrapper')
- def test_power_on(self, mock_wrap, mock_lock, mock_power_on):
- entry = mock.Mock(state=pvm_bp.LPARState.NOT_ACTIVATED)
- mock_wrap.return_value = entry
-
- vm.power_on(None, self.inst)
- mock_power_on.assert_called_once_with(entry, None)
- mock_lock.assert_called_once_with('power_%s' % self.inst.uuid)
-
- mock_power_on.reset_mock()
- mock_lock.reset_mock()
-
- stop_states = [
- pvm_bp.LPARState.RUNNING, pvm_bp.LPARState.STARTING,
- pvm_bp.LPARState.OPEN_FIRMWARE, pvm_bp.LPARState.SHUTTING_DOWN,
- pvm_bp.LPARState.ERROR, pvm_bp.LPARState.RESUMING,
- pvm_bp.LPARState.SUSPENDING]
-
- for stop_state in stop_states:
- entry.state = stop_state
- vm.power_on(None, self.inst)
- mock_lock.assert_called_once_with('power_%s' % self.inst.uuid)
- mock_lock.reset_mock()
- self.assertEqual(0, mock_power_on.call_count)
-
- @mock.patch('pypowervm.tasks.power.power_on', autospec=True)
- @mock.patch('nova.virt.powervm.vm.get_instance_wrapper')
- def test_power_on_negative(self, mock_wrp, mock_power_on):
- mock_wrp.return_value = mock.Mock(state=pvm_bp.LPARState.NOT_ACTIVATED)
-
- # Convertible (PowerVM) exception
- mock_power_on.side_effect = pvm_exc.VMPowerOnFailure(
- reason='Something bad', lpar_nm='TheLPAR')
- self.assertRaises(exception.InstancePowerOnFailure,
- vm.power_on, None, self.inst)
-
- # Non-pvm error raises directly
- mock_power_on.side_effect = ValueError()
- self.assertRaises(ValueError, vm.power_on, None, self.inst)
-
- @mock.patch('pypowervm.tasks.power.PowerOp', autospec=True)
- @mock.patch('pypowervm.tasks.power.power_off_progressive', autospec=True)
- @mock.patch('oslo_concurrency.lockutils.lock', autospec=True)
- @mock.patch('nova.virt.powervm.vm.get_instance_wrapper')
- def test_power_off(self, mock_wrap, mock_lock, mock_power_off, mock_pop):
- entry = mock.Mock(state=pvm_bp.LPARState.NOT_ACTIVATED)
- mock_wrap.return_value = entry
-
- vm.power_off(None, self.inst)
- self.assertEqual(0, mock_power_off.call_count)
- self.assertEqual(0, mock_pop.stop.call_count)
- mock_lock.assert_called_once_with('power_%s' % self.inst.uuid)
-
- stop_states = [
- pvm_bp.LPARState.RUNNING, pvm_bp.LPARState.STARTING,
- pvm_bp.LPARState.OPEN_FIRMWARE, pvm_bp.LPARState.SHUTTING_DOWN,
- pvm_bp.LPARState.ERROR, pvm_bp.LPARState.RESUMING,
- pvm_bp.LPARState.SUSPENDING]
- for stop_state in stop_states:
- entry.state = stop_state
- mock_power_off.reset_mock()
- mock_pop.stop.reset_mock()
- mock_lock.reset_mock()
- vm.power_off(None, self.inst)
- mock_power_off.assert_called_once_with(entry)
- self.assertEqual(0, mock_pop.stop.call_count)
- mock_lock.assert_called_once_with('power_%s' % self.inst.uuid)
- mock_power_off.reset_mock()
- mock_lock.reset_mock()
- vm.power_off(None, self.inst, force_immediate=True, timeout=5)
- self.assertEqual(0, mock_power_off.call_count)
- mock_pop.stop.assert_called_once_with(
- entry, opts=mock.ANY, timeout=5)
- self.assertEqual('PowerOff(immediate=true, operation=shutdown)',
- str(mock_pop.stop.call_args[1]['opts']))
- mock_lock.assert_called_once_with('power_%s' % self.inst.uuid)
-
- @mock.patch('pypowervm.tasks.power.power_off_progressive', autospec=True)
- @mock.patch('nova.virt.powervm.vm.get_instance_wrapper')
- def test_power_off_negative(self, mock_wrap, mock_power_off):
- """Negative tests."""
- mock_wrap.return_value = mock.Mock(state=pvm_bp.LPARState.RUNNING)
-
- # Raise the expected pypowervm exception
- mock_power_off.side_effect = pvm_exc.VMPowerOffFailure(
- reason='Something bad.', lpar_nm='TheLPAR')
- # We should get a valid Nova exception that the compute manager expects
- self.assertRaises(exception.InstancePowerOffFailure,
- vm.power_off, None, self.inst)
-
- # Non-pvm error raises directly
- mock_power_off.side_effect = ValueError()
- self.assertRaises(ValueError, vm.power_off, None, self.inst)
-
- @mock.patch('pypowervm.tasks.power.power_on', autospec=True)
- @mock.patch('pypowervm.tasks.power.power_off_progressive', autospec=True)
- @mock.patch('pypowervm.tasks.power.PowerOp', autospec=True)
- @mock.patch('oslo_concurrency.lockutils.lock', autospec=True)
- @mock.patch('nova.virt.powervm.vm.get_instance_wrapper')
- def test_reboot(self, mock_wrap, mock_lock, mock_pop, mock_pwroff,
- mock_pwron):
- entry = mock.Mock(state=pvm_bp.LPARState.NOT_ACTIVATED)
- mock_wrap.return_value = entry
-
- # No power_off
- vm.reboot('adap', self.inst, False)
- mock_lock.assert_called_once_with('power_%s' % self.inst.uuid)
- mock_wrap.assert_called_once_with('adap', self.inst)
- mock_pwron.assert_called_once_with(entry, None)
- self.assertEqual(0, mock_pwroff.call_count)
- self.assertEqual(0, mock_pop.stop.call_count)
-
- mock_pwron.reset_mock()
-
- # power_off (no power_on) hard
- entry.state = pvm_bp.LPARState.RUNNING
- vm.reboot('adap', self.inst, True)
- self.assertEqual(0, mock_pwron.call_count)
- self.assertEqual(0, mock_pwroff.call_count)
- mock_pop.stop.assert_called_once_with(entry, opts=mock.ANY)
- self.assertEqual(
- 'PowerOff(immediate=true, operation=shutdown, restart=true)',
- str(mock_pop.stop.call_args[1]['opts']))
-
- mock_pop.reset_mock()
-
- # power_off (no power_on) soft
- entry.state = pvm_bp.LPARState.RUNNING
- vm.reboot('adap', self.inst, False)
- self.assertEqual(0, mock_pwron.call_count)
- mock_pwroff.assert_called_once_with(entry, restart=True)
- self.assertEqual(0, mock_pop.stop.call_count)
-
- mock_pwroff.reset_mock()
-
- # PowerVM error is converted
- mock_pop.stop.side_effect = pvm_exc.TimeoutError("Timed out")
- self.assertRaises(exception.InstanceRebootFailure,
- vm.reboot, 'adap', self.inst, True)
-
- # Non-PowerVM error is raised directly
- mock_pwroff.side_effect = ValueError
- self.assertRaises(ValueError, vm.reboot, 'adap', self.inst, False)
-
- @mock.patch('oslo_serialization.jsonutils.loads')
- def test_get_vm_qp(self, mock_loads):
- self.apt.helpers = ['helper1', pvm_log.log_helper, 'helper3']
-
- # Defaults
- self.assertEqual(mock_loads.return_value,
- vm.get_vm_qp(self.apt, 'lpar_uuid'))
- self.apt.read.assert_called_once_with(
- 'LogicalPartition', root_id='lpar_uuid', suffix_type='quick',
- suffix_parm=None)
- mock_loads.assert_called_once_with(self.apt.read.return_value.body)
-
- self.apt.read.reset_mock()
- mock_loads.reset_mock()
-
- # Specific qprop, no logging errors
- self.assertEqual(mock_loads.return_value,
- vm.get_vm_qp(self.apt, 'lpar_uuid', qprop='Prop',
- log_errors=False))
- self.apt.read.assert_called_once_with(
- 'LogicalPartition', root_id='lpar_uuid', suffix_type='quick',
- suffix_parm='Prop', helpers=['helper1', 'helper3'])
-
- resp = mock.MagicMock()
- resp.status = 404
- self.apt.read.side_effect = pvm_exc.HttpError(resp)
- self.assertRaises(exception.InstanceNotFound, vm.get_vm_qp, self.apt,
- 'lpar_uuid', log_errors=False)
-
- self.apt.read.side_effect = pvm_exc.Error("message", response=None)
- self.assertRaises(pvm_exc.Error, vm.get_vm_qp, self.apt,
- 'lpar_uuid', log_errors=False)
-
- resp.status = 500
- self.apt.read.side_effect = pvm_exc.Error("message", response=resp)
- self.assertRaises(pvm_exc.Error, vm.get_vm_qp, self.apt,
- 'lpar_uuid', log_errors=False)
-
- @mock.patch('nova.virt.powervm.vm.get_pvm_uuid')
- @mock.patch('pypowervm.wrappers.network.CNA.search')
- @mock.patch('pypowervm.wrappers.network.CNA.get')
- def test_get_cnas(self, mock_get, mock_search, mock_uuid):
- # No kwargs: get
- self.assertEqual(mock_get.return_value, vm.get_cnas(self.apt, 'inst'))
- mock_uuid.assert_called_once_with('inst')
- mock_get.assert_called_once_with(self.apt, parent_type=pvm_lpar.LPAR,
- parent_uuid=mock_uuid.return_value)
- mock_search.assert_not_called()
- # With kwargs: search
- mock_get.reset_mock()
- mock_uuid.reset_mock()
- self.assertEqual(mock_search.return_value, vm.get_cnas(
- self.apt, 'inst', one=2, three=4))
- mock_uuid.assert_called_once_with('inst')
- mock_search.assert_called_once_with(
- self.apt, parent_type=pvm_lpar.LPAR,
- parent_uuid=mock_uuid.return_value, one=2, three=4)
- mock_get.assert_not_called()
-
- def test_norm_mac(self):
- EXPECTED = "12:34:56:78:90:ab"
- self.assertEqual(EXPECTED, vm.norm_mac("12:34:56:78:90:ab"))
- self.assertEqual(EXPECTED, vm.norm_mac("1234567890ab"))
- self.assertEqual(EXPECTED, vm.norm_mac("12:34:56:78:90:AB"))
- self.assertEqual(EXPECTED, vm.norm_mac("1234567890AB"))
diff --git a/nova/tests/unit/virt/powervm/volume/__init__.py b/nova/tests/unit/virt/powervm/volume/__init__.py
deleted file mode 100644
index e69de29bb2..0000000000
--- a/nova/tests/unit/virt/powervm/volume/__init__.py
+++ /dev/null
diff --git a/nova/tests/unit/virt/powervm/volume/test_fcvscsi.py b/nova/tests/unit/virt/powervm/volume/test_fcvscsi.py
deleted file mode 100644
index 2db5b1a663..0000000000
--- a/nova/tests/unit/virt/powervm/volume/test_fcvscsi.py
+++ /dev/null
@@ -1,456 +0,0 @@
-# Copyright 2015, 2018 IBM Corp.
-#
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import mock
-from pypowervm import const as pvm_const
-from pypowervm.tasks import hdisk
-from pypowervm.tests import test_fixtures as pvm_fx
-from pypowervm.utils import transaction as pvm_tx
-from pypowervm.wrappers import storage as pvm_stor
-from pypowervm.wrappers import virtual_io_server as pvm_vios
-
-from nova import conf as cfg
-from nova import exception as exc
-from nova import test
-from nova.virt.powervm.volume import fcvscsi
-
-CONF = cfg.CONF
-
-I_WWPN_1 = '21000024FF649104'
-I_WWPN_2 = '21000024FF649105'
-
-
-class TestVSCSIAdapter(test.NoDBTestCase):
-
- def setUp(self):
- super(TestVSCSIAdapter, self).setUp()
-
- self.adpt = self.useFixture(pvm_fx.AdapterFx()).adpt
- self.wtsk = mock.create_autospec(pvm_tx.WrapperTask, instance=True)
- self.ftsk = mock.create_autospec(pvm_tx.FeedTask, instance=True)
- self.ftsk.configure_mock(wrapper_tasks={'vios_uuid': self.wtsk})
-
- @mock.patch('nova.virt.powervm.vm.get_pvm_uuid')
- def init_vol_adpt(mock_pvm_uuid):
- con_info = {
- 'serial': 'id',
- 'data': {
- 'initiator_target_map': {
- I_WWPN_1: ['t1'],
- I_WWPN_2: ['t2', 't3']
- },
- 'target_lun': '1',
- 'volume_id': 'a_volume_identifier',
- },
- }
- mock_inst = mock.MagicMock()
- mock_pvm_uuid.return_value = '1234'
-
- return fcvscsi.FCVscsiVolumeAdapter(
- self.adpt, mock_inst, con_info, stg_ftsk=self.ftsk)
- self.vol_drv = init_vol_adpt()
-
- @mock.patch('pypowervm.utils.transaction.FeedTask', autospec=True)
- @mock.patch('pypowervm.wrappers.virtual_io_server.VIOS', autospec=True)
- def test_reset_stg_ftsk(self, mock_vios, mock_ftsk):
- self.vol_drv.reset_stg_ftsk('stg_ftsk')
- self.assertEqual('stg_ftsk', self.vol_drv.stg_ftsk)
-
- mock_vios.getter.return_value = 'getter'
- mock_ftsk.return_value = 'local_feed_task'
- self.vol_drv.reset_stg_ftsk()
- self.assertEqual('local_feed_task', self.vol_drv.stg_ftsk)
- mock_vios.getter.assert_called_once_with(
- self.adpt, xag=[pvm_const.XAG.VIO_SMAP])
- mock_ftsk.assert_called_once_with('local_feed_task', 'getter')
-
- @mock.patch('pypowervm.tasks.partition.get_physical_wwpns', autospec=True)
- def test_wwpns(self, mock_vio_wwpns):
- mock_vio_wwpns.return_value = ['aa', 'bb']
- wwpns = fcvscsi.wwpns(self.adpt)
- self.assertListEqual(['aa', 'bb'], wwpns)
- mock_vio_wwpns.assert_called_once_with(self.adpt, force_refresh=False)
-
- def test_set_udid(self):
- # Mock connection info
- self.vol_drv.connection_info['data'][fcvscsi.UDID_KEY] = None
-
- # Set the UDID
- self.vol_drv._set_udid('udid')
-
- # Verify
- self.assertEqual('udid',
- self.vol_drv.connection_info['data'][fcvscsi.UDID_KEY])
-
- def test_get_udid(self):
- # Set the value to retrieve
- self.vol_drv.connection_info['data'][fcvscsi.UDID_KEY] = 'udid'
- retrieved_udid = self.vol_drv._get_udid()
- # Check key found
- self.assertEqual('udid', retrieved_udid)
-
- # Check key not found
- self.vol_drv.connection_info['data'].pop(fcvscsi.UDID_KEY)
- retrieved_udid = self.vol_drv._get_udid()
- # Check key not found
- self.assertIsNone(retrieved_udid)
-
- @mock.patch('nova.virt.powervm.vm.get_instance_wrapper')
- @mock.patch('pypowervm.utils.transaction.FeedTask', autospec=True)
- def test_attach_volume(self, mock_feed_task, mock_get_wrap):
- mock_lpar_wrap = mock.MagicMock()
- mock_lpar_wrap.can_modify_io.return_value = True, None
- mock_get_wrap.return_value = mock_lpar_wrap
- mock_attach_ftsk = mock_feed_task.return_value
-
- # Pass if all vioses modified
- mock_ret = {'wrapper_task_rets': {'vios1': {'vio_modified': True},
- 'vios2': {'vio_modified': True}}}
- mock_attach_ftsk.execute.return_value = mock_ret
- self.vol_drv.attach_volume()
- mock_feed_task.assert_called_once()
- mock_attach_ftsk.add_functor_subtask.assert_called_once_with(
- self.vol_drv._attach_volume_to_vio, provides='vio_modified',
- flag_update=False)
- mock_attach_ftsk.execute.assert_called_once()
- self.ftsk.execute.assert_called_once()
-
- mock_feed_task.reset_mock()
- mock_attach_ftsk.reset_mock()
- self.ftsk.reset_mock()
-
- # Pass if 1 vios modified
- mock_ret = {'wrapper_task_rets': {'vios1': {'vio_modified': True},
- 'vios2': {'vio_modified': False}}}
- mock_attach_ftsk.execute.return_value = mock_ret
- self.vol_drv.attach_volume()
- mock_feed_task.assert_called_once()
- mock_attach_ftsk.add_functor_subtask.assert_called_once_with(
- self.vol_drv._attach_volume_to_vio, provides='vio_modified',
- flag_update=False)
- mock_attach_ftsk.execute.assert_called_once()
- self.ftsk.execute.assert_called_once()
-
- # Raise if no vios modified
- mock_ret = {'wrapper_task_rets': {'vios1': {'vio_modified': False},
- 'vios2': {'vio_modified': False}}}
- mock_attach_ftsk.execute.return_value = mock_ret
- self.assertRaises(exc.VolumeAttachFailed, self.vol_drv.attach_volume)
-
- # Raise if vm in invalid state
- mock_lpar_wrap.can_modify_io.return_value = False, None
- self.assertRaises(exc.VolumeAttachFailed, self.vol_drv.attach_volume)
-
- @mock.patch('nova.virt.powervm.volume.fcvscsi.FCVscsiVolumeAdapter.'
- '_set_udid')
- @mock.patch('nova.virt.powervm.volume.fcvscsi.FCVscsiVolumeAdapter.'
- '_add_append_mapping')
- @mock.patch('nova.virt.powervm.volume.fcvscsi.FCVscsiVolumeAdapter.'
- '_discover_volume_on_vios')
- @mock.patch('pypowervm.tasks.hdisk.good_discovery', autospec=True)
- def test_attach_volume_to_vio(self, mock_good_disc, mock_disc_vol,
- mock_add_map, mock_set_udid):
- # Setup mocks
- mock_vios = mock.MagicMock()
- mock_vios.uuid = 'uuid'
- mock_disc_vol.return_value = 'status', 'devname', 'udid'
-
- # Bad discovery
- mock_good_disc.return_value = False
- ret = self.vol_drv._attach_volume_to_vio(mock_vios)
- self.assertFalse(ret)
- mock_disc_vol.assert_called_once_with(mock_vios)
- mock_good_disc.assert_called_once_with('status', 'devname')
-
- # Good discovery
- mock_good_disc.return_value = True
- ret = self.vol_drv._attach_volume_to_vio(mock_vios)
- self.assertTrue(ret)
- mock_add_map.assert_called_once_with(
- 'uuid', 'devname', tag='a_volume_identifier')
- mock_set_udid.assert_called_once_with('udid')
-
- def test_extend_volume(self):
- # Ensure the method is implemented
- self.vol_drv.extend_volume()
-
- @mock.patch('nova.virt.powervm.volume.fcvscsi.LOG')
- @mock.patch('pypowervm.tasks.hdisk.good_discovery', autospec=True)
- @mock.patch('pypowervm.tasks.hdisk.discover_hdisk', autospec=True)
- @mock.patch('pypowervm.tasks.hdisk.build_itls', autospec=True)
- @mock.patch('nova.virt.powervm.volume.fcvscsi.FCVscsiVolumeAdapter.'
- '_get_hdisk_itls')
- def test_discover_volume_on_vios(self, mock_get_itls, mock_build_itls,
- mock_disc_hdisk, mock_good_disc,
- mock_log):
- mock_vios = mock.MagicMock()
- mock_vios.uuid = 'uuid'
- mock_get_itls.return_value = 'v_wwpns', 't_wwpns', 'lun'
- mock_build_itls.return_value = 'itls'
- mock_disc_hdisk.return_value = 'status', 'devname', 'udid'
-
- # Good discovery
- mock_good_disc.return_value = True
- status, devname, udid = self.vol_drv._discover_volume_on_vios(
- mock_vios)
- self.assertEqual(mock_disc_hdisk.return_value[0], status)
- self.assertEqual(mock_disc_hdisk.return_value[1], devname)
- self.assertEqual(mock_disc_hdisk.return_value[2], udid)
- mock_get_itls.assert_called_once_with(mock_vios)
- mock_build_itls.assert_called_once_with('v_wwpns', 't_wwpns', 'lun')
- mock_disc_hdisk.assert_called_once_with(self.adpt, 'uuid', 'itls')
- mock_good_disc.assert_called_once_with('status', 'devname')
- mock_log.info.assert_called_once()
- mock_log.warning.assert_not_called()
-
- mock_log.reset_mock()
-
- # Bad discovery, not device in use status
- mock_good_disc.return_value = False
- self.vol_drv._discover_volume_on_vios(mock_vios)
- mock_log.warning.assert_not_called()
- mock_log.info.assert_not_called()
-
- # Bad discovery, device in use status
- mock_disc_hdisk.return_value = (hdisk.LUAStatus.DEVICE_IN_USE, 'dev',
- 'udid')
- self.vol_drv._discover_volume_on_vios(mock_vios)
- mock_log.warning.assert_called_once()
-
- def test_get_hdisk_itls(self):
- """Validates the _get_hdisk_itls method."""
-
- mock_vios = mock.MagicMock()
- mock_vios.get_active_pfc_wwpns.return_value = [I_WWPN_1]
-
- i_wwpn, t_wwpns, lun = self.vol_drv._get_hdisk_itls(mock_vios)
- self.assertListEqual([I_WWPN_1], i_wwpn)
- self.assertListEqual(['t1'], t_wwpns)
- self.assertEqual('1', lun)
-
- mock_vios.get_active_pfc_wwpns.return_value = [I_WWPN_2]
- i_wwpn, t_wwpns, lun = self.vol_drv._get_hdisk_itls(mock_vios)
- self.assertListEqual([I_WWPN_2], i_wwpn)
- self.assertListEqual(['t2', 't3'], t_wwpns)
-
- mock_vios.get_active_pfc_wwpns.return_value = ['12345']
- i_wwpn, t_wwpns, lun = self.vol_drv._get_hdisk_itls(mock_vios)
- self.assertListEqual([], i_wwpn)
-
- @mock.patch('pypowervm.wrappers.storage.PV', autospec=True)
- @mock.patch('pypowervm.tasks.scsi_mapper.build_vscsi_mapping',
- autospec=True)
- @mock.patch('pypowervm.tasks.scsi_mapper.add_map', autospec=True)
- def test_add_append_mapping(self, mock_add_map, mock_bld_map, mock_pv):
- def test_afs(add_func):
- mock_vios = mock.create_autospec(pvm_vios.VIOS)
- self.assertEqual(mock_add_map.return_value, add_func(mock_vios))
- mock_pv.bld.assert_called_once_with(self.adpt, 'devname', tag=None)
- mock_bld_map.assert_called_once_with(
- None, mock_vios, self.vol_drv.vm_uuid,
- mock_pv.bld.return_value)
- mock_add_map.assert_called_once_with(
- mock_vios, mock_bld_map.return_value)
-
- self.wtsk.add_functor_subtask.side_effect = test_afs
- self.vol_drv._add_append_mapping('vios_uuid', 'devname')
- self.wtsk.add_functor_subtask.assert_called_once()
-
- @mock.patch('nova.virt.powervm.volume.fcvscsi.LOG.warning')
- @mock.patch('nova.virt.powervm.vm.get_instance_wrapper')
- @mock.patch('pypowervm.utils.transaction.FeedTask', autospec=True)
- def test_detach_volume(self, mock_feed_task, mock_get_wrap, mock_log):
- mock_lpar_wrap = mock.MagicMock()
- mock_lpar_wrap.can_modify_io.return_value = True, None
- mock_get_wrap.return_value = mock_lpar_wrap
- mock_detach_ftsk = mock_feed_task.return_value
-
- # Multiple vioses modified
- mock_ret = {'wrapper_task_rets': {'vios1': {'vio_modified': True},
- 'vios2': {'vio_modified': True}}}
- mock_detach_ftsk.execute.return_value = mock_ret
- self.vol_drv.detach_volume()
- mock_feed_task.assert_called_once()
- mock_detach_ftsk.add_functor_subtask.assert_called_once_with(
- self.vol_drv._detach_vol_for_vio, provides='vio_modified',
- flag_update=False)
- mock_detach_ftsk.execute.assert_called_once_with()
- self.ftsk.execute.assert_called_once_with()
- mock_log.assert_not_called()
-
- # 1 vios modified
- mock_ret = {'wrapper_task_rets': {'vios1': {'vio_modified': True},
- 'vios2': {'vio_modified': False}}}
- mock_detach_ftsk.execute.return_value = mock_ret
- self.vol_drv.detach_volume()
- mock_log.assert_not_called()
-
- # No vioses modifed
- mock_ret = {'wrapper_task_rets': {'vios1': {'vio_modified': False},
- 'vios2': {'vio_modified': False}}}
- mock_detach_ftsk.execute.return_value = mock_ret
- self.vol_drv.detach_volume()
- mock_log.assert_called_once()
-
- # Raise if exception during execute
- mock_detach_ftsk.execute.side_effect = Exception()
- self.assertRaises(exc.VolumeDetachFailed, self.vol_drv.detach_volume)
-
- # Raise if vm in invalid state
- mock_lpar_wrap.can_modify_io.return_value = False, None
- self.assertRaises(exc.VolumeDetachFailed, self.vol_drv.detach_volume)
-
- @mock.patch('pypowervm.tasks.hdisk.good_discovery', autospec=True)
- @mock.patch('nova.virt.powervm.volume.fcvscsi.FCVscsiVolumeAdapter.'
- '_discover_volume_on_vios')
- @mock.patch('nova.virt.powervm.volume.fcvscsi.FCVscsiVolumeAdapter.'
- '_add_remove_mapping')
- @mock.patch('nova.virt.powervm.volume.fcvscsi.FCVscsiVolumeAdapter.'
- '_add_remove_hdisk')
- @mock.patch('nova.virt.powervm.vm.get_vm_qp')
- def test_detach_vol_for_vio(self, mock_get_qp, mock_rm_hdisk, mock_rm_map,
- mock_disc_vol, mock_good_disc):
- # Good detach, bdm data is found
- self.vol_drv._set_udid('udid')
- mock_vios = mock.MagicMock()
- mock_vios.uuid = 'vios_uuid'
- mock_vios.hdisk_from_uuid.return_value = 'devname'
- mock_get_qp.return_value = 'part_id'
- ret = self.vol_drv._detach_vol_for_vio(mock_vios)
- self.assertTrue(ret)
- mock_vios.hdisk_from_uuid.assert_called_once_with('udid')
- mock_rm_map.assert_called_once_with('part_id', 'vios_uuid', 'devname')
- mock_rm_hdisk.assert_called_once_with(mock_vios, 'devname')
-
- mock_vios.reset_mock()
- mock_rm_map.reset_mock()
- mock_rm_hdisk.reset_mock()
-
- # Good detach, no udid
- self.vol_drv._set_udid(None)
- mock_disc_vol.return_value = 'status', 'devname', 'udid'
- mock_good_disc.return_value = True
- ret = self.vol_drv._detach_vol_for_vio(mock_vios)
- self.assertTrue(ret)
- mock_vios.hdisk_from_uuid.assert_not_called()
- mock_disc_vol.assert_called_once_with(mock_vios)
- mock_good_disc.assert_called_once_with('status', 'devname')
- mock_rm_map.assert_called_once_with('part_id', 'vios_uuid', 'devname')
- mock_rm_hdisk.assert_called_once_with(mock_vios, 'devname')
-
- mock_vios.reset_mock()
- mock_disc_vol.reset_mock()
- mock_good_disc.reset_mock()
- mock_rm_map.reset_mock()
- mock_rm_hdisk.reset_mock()
-
- # Good detach, no device name
- self.vol_drv._set_udid('udid')
- mock_vios.hdisk_from_uuid.return_value = None
- ret = self.vol_drv._detach_vol_for_vio(mock_vios)
- self.assertTrue(ret)
- mock_vios.hdisk_from_uuid.assert_called_once_with('udid')
- mock_disc_vol.assert_called_once_with(mock_vios)
- mock_good_disc.assert_called_once_with('status', 'devname')
- mock_rm_map.assert_called_once_with('part_id', 'vios_uuid', 'devname')
- mock_rm_hdisk.assert_called_once_with(mock_vios, 'devname')
-
- mock_rm_map.reset_mock()
- mock_rm_hdisk.reset_mock()
-
- # Bad detach, invalid state
- mock_good_disc.return_value = False
- ret = self.vol_drv._detach_vol_for_vio(mock_vios)
- self.assertFalse(ret)
- mock_rm_map.assert_not_called()
- mock_rm_hdisk.assert_not_called()
-
- # Bad detach, exception discovering volume on vios
- mock_disc_vol.side_effect = Exception()
- ret = self.vol_drv._detach_vol_for_vio(mock_vios)
- self.assertFalse(ret)
- mock_rm_map.assert_not_called()
- mock_rm_hdisk.assert_not_called()
-
- @mock.patch('pypowervm.tasks.scsi_mapper.gen_match_func', autospec=True)
- @mock.patch('pypowervm.tasks.scsi_mapper.remove_maps', autospec=True)
- def test_add_remove_mapping(self, mock_rm_maps, mock_gen_match):
- def test_afs(rm_func):
- mock_vios = mock.create_autospec(pvm_vios.VIOS)
- self.assertEqual(mock_rm_maps.return_value, rm_func(mock_vios))
- mock_gen_match.assert_called_once_with(
- pvm_stor.PV, names=['devname'])
- mock_rm_maps.assert_called_once_with(
- mock_vios, 'vm_uuid', mock_gen_match.return_value)
-
- self.wtsk.add_functor_subtask.side_effect = test_afs
- self.vol_drv._add_remove_mapping('vm_uuid', 'vios_uuid', 'devname')
- self.wtsk.add_functor_subtask.assert_called_once()
-
- @mock.patch('pypowervm.tasks.hdisk.remove_hdisk', autospec=True)
- @mock.patch('taskflow.task.FunctorTask', autospec=True)
- @mock.patch('nova.virt.powervm.volume.fcvscsi.FCVscsiVolumeAdapter.'
- '_check_host_mappings')
- def test_add_remove_hdisk(self, mock_check_maps, mock_functask,
- mock_rm_hdisk):
- mock_vios = mock.MagicMock()
- mock_vios.uuid = 'uuid'
- mock_check_maps.return_value = True
- self.vol_drv._add_remove_hdisk(mock_vios, 'devname')
- mock_functask.assert_not_called()
- self.ftsk.add_post_execute.assert_not_called()
- mock_check_maps.assert_called_once_with(mock_vios, 'devname')
- self.assertEqual(0, mock_rm_hdisk.call_count)
-
- def test_functor_task(rm_hdisk, name=None):
- rm_hdisk()
- return 'functor_task'
-
- mock_check_maps.return_value = False
- mock_functask.side_effect = test_functor_task
- self.vol_drv._add_remove_hdisk(mock_vios, 'devname')
- mock_functask.assert_called_once()
- self.ftsk.add_post_execute.assert_called_once_with('functor_task')
- mock_rm_hdisk.assert_called_once_with(self.adpt, CONF.host,
- 'devname', 'uuid')
-
- @mock.patch('pypowervm.tasks.scsi_mapper.gen_match_func', autospec=True)
- @mock.patch('pypowervm.tasks.scsi_mapper.find_maps', autospec=True)
- def test_check_host_mappings(self, mock_find_maps, mock_gen_match):
- mock_vios = mock.MagicMock()
- mock_vios.uuid = 'uuid2'
- mock_v1 = mock.MagicMock(scsi_mappings='scsi_maps_1', uuid='uuid1')
- mock_v2 = mock.MagicMock(scsi_mappings='scsi_maps_2', uuid='uuid2')
- mock_feed = [mock_v1, mock_v2]
- self.ftsk.feed = mock_feed
-
- # Multiple mappings found
- mock_find_maps.return_value = ['map1', 'map2']
- ret = self.vol_drv._check_host_mappings(mock_vios, 'devname')
- self.assertTrue(ret)
- mock_gen_match.assert_called_once_with(pvm_stor.PV, names=['devname'])
- mock_find_maps.assert_called_once_with('scsi_maps_2', None,
- mock_gen_match.return_value)
-
- # One mapping found
- mock_find_maps.return_value = ['map1']
- ret = self.vol_drv._check_host_mappings(mock_vios, 'devname')
- self.assertFalse(ret)
-
- # No mappings found
- mock_find_maps.return_value = []
- ret = self.vol_drv._check_host_mappings(mock_vios, 'devname')
- self.assertFalse(ret)
diff --git a/nova/tests/unit/virt/test_block_device.py b/nova/tests/unit/virt/test_block_device.py
index aff6c5ef19..703f15967c 100644
--- a/nova/tests/unit/virt/test_block_device.py
+++ b/nova/tests/unit/virt/test_block_device.py
@@ -12,8 +12,9 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
from os_brick import encryptors
+from unittest import mock
+
from oslo_serialization import jsonutils
from oslo_utils.fixture import uuidsentinel as uuids
@@ -35,6 +36,9 @@ ATTACHMENT_ID = uuids.attachment_id
class TestDriverBlockDevice(test.NoDBTestCase):
+ # os-brick>=5.1 now uses external file system locks instead of internal
+ # locks so we need to set up locking
+ REQUIRES_LOCKING = True
# This is used to signal if we're dealing with a new style volume
# attachment (Cinder v3.44 flow).
attachment_id = None
@@ -45,7 +49,8 @@ class TestDriverBlockDevice(test.NoDBTestCase):
'volume': driver_block_device.DriverVolumeBlockDevice,
'volsnapshot': driver_block_device.DriverVolSnapshotBlockDevice,
'volimage': driver_block_device.DriverVolImageBlockDevice,
- 'volblank': driver_block_device.DriverVolBlankBlockDevice
+ 'volblank': driver_block_device.DriverVolBlankBlockDevice,
+ 'image': driver_block_device.DriverImageBlockDevice,
}
swap_bdm_dict = block_device.BlockDeviceDict(
@@ -74,14 +79,22 @@ class TestDriverBlockDevice(test.NoDBTestCase):
'volume_size': 4,
'guest_format': 'ext4',
'delete_on_termination': True,
- 'boot_index': -1})
+ 'boot_index': -1,
+ 'encrypted': False,
+ 'encryption_secret_uuid': None,
+ 'encryption_format': None,
+ 'encryption_options': None})
ephemeral_driver_bdm = {
'device_name': '/dev/sdc1',
'size': 4,
'device_type': 'disk',
'guest_format': 'ext4',
- 'disk_bus': 'scsi'}
+ 'disk_bus': 'scsi',
+ 'encrypted': False,
+ 'encryption_secret_uuid': None,
+ 'encryption_format': None,
+ 'encryption_options': None}
volume_bdm_dict = block_device.BlockDeviceDict(
{'id': 3, 'instance_uuid': uuids.instance,
@@ -206,6 +219,35 @@ class TestDriverBlockDevice(test.NoDBTestCase):
'boot_index': -1,
'volume_type': None}
+ image_bdm_dict = block_device.BlockDeviceDict(
+ {'id': 7, 'instance_uuid': uuids.instance,
+ 'device_name': '/dev/vda',
+ 'source_type': 'image',
+ 'destination_type': 'local',
+ 'disk_bus': 'virtio',
+ 'device_type': 'disk',
+ 'guest_format': 'ext4',
+ 'boot_index': 0,
+ 'image_id': 'fake-image-id-1',
+ 'volume_size': 5,
+ 'encrypted': True,
+ 'encryption_secret_uuid': uuids.secret,
+ 'encryption_format': 'plain',
+ 'encryption_options': None})
+
+ image_driver_bdm = {
+ 'device_name': '/dev/vda',
+ 'device_type': 'disk',
+ 'guest_format': 'ext4',
+ 'disk_bus': 'virtio',
+ 'boot_index': 0,
+ 'image_id': 'fake-image-id-1',
+ 'size': 5,
+ 'encrypted': True,
+ 'encryption_secret_uuid': uuids.secret,
+ 'encryption_format': 'plain',
+ 'encryption_options': None}
+
def setUp(self):
super(TestDriverBlockDevice, self).setUp()
self.volume_api = mock.MagicMock(autospec=cinder.API)
@@ -215,6 +257,8 @@ class TestDriverBlockDevice(test.NoDBTestCase):
# create bdm objects for testing
self.swap_bdm = fake_block_device.fake_bdm_object(
self.context, self.swap_bdm_dict)
+ self.image_bdm = fake_block_device.fake_bdm_object(
+ self.context, self.image_bdm_dict)
self.ephemeral_bdm = fake_block_device.fake_bdm_object(
self.context, self.ephemeral_bdm_dict)
self.volume_bdm = fake_block_device.fake_bdm_object(
@@ -333,6 +377,10 @@ class TestDriverBlockDevice(test.NoDBTestCase):
if field == 'attachment_id':
# Must set UUID values on UUID fields.
fake_value = ATTACHMENT_ID
+ elif isinstance(test_bdm._bdm_obj.fields[fld],
+ fields.UUIDField):
+ # Generically handle other UUID fields.
+ fake_value = uuids.fake_value
else:
fake_value = 'fake_changed_value'
test_bdm[field] = fake_value
@@ -373,6 +421,20 @@ class TestDriverBlockDevice(test.NoDBTestCase):
def test_driver_swap_default_size(self):
self._test_driver_default_size('swap')
+ def test_driver_image_block_device(self):
+ self._test_driver_device("image")
+
+ def test_driver_image_default_size(self):
+ self._test_driver_default_size('image')
+
+ def test_driver_image_block_device_destination_not_local(self):
+ self._test_driver_device('image')
+ bdm = self.image_bdm_dict.copy()
+ bdm['destination_type'] = 'volume'
+ self.assertRaises(driver_block_device._InvalidType,
+ self.driver_classes['image'],
+ fake_block_device.fake_bdm_object(self.context, bdm))
+
def test_driver_ephemeral_block_device(self):
self._test_driver_device("ephemeral")
@@ -402,7 +464,7 @@ class TestDriverBlockDevice(test.NoDBTestCase):
self.assertEqual(test_bdm.volume_size, 3)
self.assertEqual('fake-snapshot-id-1', test_bdm.get('snapshot_id'))
- def test_driver_image_block_device(self):
+ def test_driver_volume_image_block_device(self):
self._test_driver_device('volimage')
test_bdm = self.driver_classes['volimage'](
@@ -412,7 +474,7 @@ class TestDriverBlockDevice(test.NoDBTestCase):
self.assertEqual(test_bdm.volume_size, 1)
self.assertEqual('fake-image-id-1', test_bdm.get('image_id'))
- def test_driver_image_block_device_destination_local(self):
+ def test_driver_volume_image_block_device_destination_local(self):
self._test_driver_device('volimage')
bdm = self.volimage_bdm_dict.copy()
bdm['destination_type'] = 'local'
@@ -433,24 +495,23 @@ class TestDriverBlockDevice(test.NoDBTestCase):
def _test_call_wait_func(self, delete_on_termination, delete_fail=False):
test_bdm = self.driver_classes['volume'](self.volume_bdm)
test_bdm['delete_on_termination'] = delete_on_termination
- with mock.patch.object(self.volume_api, 'delete') as vol_delete:
- wait_func = mock.MagicMock()
- mock_exception = exception.VolumeNotCreated(volume_id='fake-id',
- seconds=1,
- attempts=1,
- volume_status='error')
- wait_func.side_effect = mock_exception
-
- if delete_on_termination and delete_fail:
- vol_delete.side_effect = Exception()
-
- self.assertRaises(exception.VolumeNotCreated,
- test_bdm._call_wait_func,
- context=self.context,
- wait_func=wait_func,
- volume_api=self.volume_api,
- volume_id='fake-id')
- self.assertEqual(delete_on_termination, vol_delete.called)
+ if delete_on_termination and delete_fail:
+ self.volume_api.delete.side_effect = Exception()
+
+ wait_func = mock.MagicMock()
+ mock_exception = exception.VolumeNotCreated(volume_id='fake-id',
+ seconds=1,
+ attempts=1,
+ volume_status='error')
+ wait_func.side_effect = mock_exception
+
+ self.assertRaises(exception.VolumeNotCreated,
+ test_bdm._call_wait_func,
+ context=self.context,
+ wait_func=wait_func,
+ volume_api=self.volume_api,
+ volume_id='fake-id')
+ self.assertEqual(delete_on_termination, self.volume_api.delete.called)
def test_call_wait_delete_volume(self):
self._test_call_wait_func(True)
@@ -483,25 +544,24 @@ class TestDriverBlockDevice(test.NoDBTestCase):
volume['shared_targets'] = True
volume['service_uuid'] = uuids.service_uuid
+ if delete_attachment_raises:
+ self.volume_api.attachment_delete.side_effect = (
+ delete_attachment_raises)
+
+ self.virt_driver.get_volume_connector.return_value = connector
+
with test.nested(
mock.patch.object(driver_bdm, '_get_volume', return_value=volume),
- mock.patch.object(self.virt_driver, 'get_volume_connector',
- return_value=connector),
mock.patch('os_brick.initiator.utils.guard_connection'),
- mock.patch.object(self.volume_api, 'attachment_delete'),
- ) as (mock_get_volume, mock_get_connector, mock_guard,
- vapi_attach_del):
-
- if delete_attachment_raises:
- vapi_attach_del.side_effect = delete_attachment_raises
+ ) as (mock_get_volume, mock_guard):
driver_bdm.detach(elevated_context, instance,
self.volume_api, self.virt_driver,
attachment_id=attachment_id)
mock_guard.assert_called_once_with(volume)
- vapi_attach_del.assert_called_once_with(elevated_context,
- attachment_id)
+ self.volume_api.attachment_delete.assert_called_once_with(
+ elevated_context, attachment_id)
def test_volume_delete_attachment_with_shared_targets(self):
self.test_volume_delete_attachment(include_shared_targets=True)
@@ -952,31 +1012,28 @@ class TestDriverBlockDevice(test.NoDBTestCase):
instance = fake_instance.fake_instance_obj(mock.sentinel.ctx,
**{'uuid': uuids.uuid})
- with test.nested(
- mock.patch.object(self.volume_api, 'get_snapshot',
- return_value=snapshot),
- mock.patch.object(self.volume_api, 'create', return_value=volume),
- mock.patch.object(self.volume_api, 'delete'),
- ) as (vol_get_snap, vol_create, vol_delete):
- wait_func = mock.MagicMock()
- mock_exception = exception.VolumeNotCreated(volume_id=volume['id'],
- seconds=1,
- attempts=1,
- volume_status='error')
- wait_func.side_effect = mock_exception
- self.assertRaises(exception.VolumeNotCreated,
- test_bdm.attach, context=self.context,
- instance=instance,
- volume_api=self.volume_api,
- virt_driver=self.virt_driver,
- wait_func=wait_func)
-
- vol_get_snap.assert_called_once_with(
- self.context, 'fake-snapshot-id-1')
- vol_create.assert_called_once_with(
- self.context, 3, '', '', availability_zone=None,
- snapshot=snapshot, volume_type=None)
- vol_delete.assert_called_once_with(self.context, volume['id'])
+ self.volume_api.get_snapshot.return_value = snapshot
+ self.volume_api.create.return_value = volume
+ wait_func = mock.MagicMock()
+ mock_exception = exception.VolumeNotCreated(volume_id=volume['id'],
+ seconds=1,
+ attempts=1,
+ volume_status='error')
+ wait_func.side_effect = mock_exception
+ self.assertRaises(exception.VolumeNotCreated,
+ test_bdm.attach, context=self.context,
+ instance=instance,
+ volume_api=self.volume_api,
+ virt_driver=self.virt_driver,
+ wait_func=wait_func)
+
+ self.volume_api.get_snapshot.assert_called_once_with(
+ self.context, 'fake-snapshot-id-1')
+ self.volume_api.create.assert_called_once_with(
+ self.context, 3, '', '', availability_zone=None,
+ snapshot=snapshot, volume_type=None)
+ self.volume_api.delete.assert_called_once_with(
+ self.context, volume['id'])
def test_snapshot_attach_volume(self):
test_bdm = self.driver_classes['volsnapshot'](
@@ -984,19 +1041,17 @@ class TestDriverBlockDevice(test.NoDBTestCase):
instance = {'id': 'fake_id', 'uuid': uuids.uuid}
- with test.nested(
- mock.patch.object(self.driver_classes['volume'], 'attach'),
- mock.patch.object(self.volume_api, 'get_snapshot'),
- mock.patch.object(self.volume_api, 'create'),
- ) as (mock_attach, mock_get_snapshot, mock_create):
+ with mock.patch.object(
+ self.driver_classes['volume'], 'attach'
+ ) as mock_attach:
test_bdm.attach(self.context, instance, self.volume_api,
self.virt_driver)
self.assertEqual('fake-volume-id-2', test_bdm.volume_id)
mock_attach.assert_called_once_with(
self.context, instance, self.volume_api, self.virt_driver)
# Make sure theses are not called
- mock_get_snapshot.assert_not_called()
- mock_create.assert_not_called()
+ self.volume_api.get_snapshot.assert_not_called()
+ self.volume_api.create.assert_not_called()
def test_snapshot_attach_no_volume_and_no_volume_type(self):
bdm = self.driver_classes['volsnapshot'](self.volsnapshot_bdm)
@@ -1006,15 +1061,10 @@ class TestDriverBlockDevice(test.NoDBTestCase):
original_volume = {'id': uuids.original_volume_id,
'volume_type_id': 'original_volume_type'}
new_volume = {'id': uuids.new_volume_id}
- with test.nested(
- mock.patch.object(self.driver_classes['volume'], 'attach'),
- mock.patch.object(self.volume_api, 'get_snapshot',
- return_value=snapshot),
- mock.patch.object(self.volume_api, 'get',
- return_value=original_volume),
- mock.patch.object(self.volume_api, 'create',
- return_value=new_volume),
- ) as (mock_attach, mock_get_snapshot, mock_get, mock_create):
+ self.volume_api.get_snapshot.return_value = snapshot
+ self.volume_api.get.return_value = original_volume
+ self.volume_api.create.return_value = new_volume
+ with mock.patch.object(self.driver_classes["volume"], "attach"):
bdm.volume_id = None
bdm.volume_type = None
bdm.attach(self.context, instance, self.volume_api,
@@ -1022,10 +1072,11 @@ class TestDriverBlockDevice(test.NoDBTestCase):
# Assert that the original volume type is fetched, stored within
# the bdm and then used to create the new snapshot based volume.
- mock_get.assert_called_once_with(self.context,
- uuids.original_volume_id)
+ self.volume_api.get.assert_called_once_with(
+ self.context, uuids.original_volume_id)
self.assertEqual('original_volume_type', bdm.volume_type)
- mock_create.assert_called_once_with(self.context, bdm.volume_size,
+ self.volume_api.create.assert_called_once_with(
+ self.context, bdm.volume_size,
'', '', volume_type='original_volume_type', snapshot=snapshot,
availability_zone=None)
@@ -1097,27 +1148,25 @@ class TestDriverBlockDevice(test.NoDBTestCase):
instance = fake_instance.fake_instance_obj(mock.sentinel.ctx,
**{'uuid': uuids.uuid})
- with test.nested(
- mock.patch.object(self.volume_api, 'create', return_value=volume),
- mock.patch.object(self.volume_api, 'delete'),
- ) as (vol_create, vol_delete):
- wait_func = mock.MagicMock()
- mock_exception = exception.VolumeNotCreated(volume_id=volume['id'],
- seconds=1,
- attempts=1,
- volume_status='error')
- wait_func.side_effect = mock_exception
- self.assertRaises(exception.VolumeNotCreated,
- test_bdm.attach, context=self.context,
- instance=instance,
- volume_api=self.volume_api,
- virt_driver=self.virt_driver,
- wait_func=wait_func)
-
- vol_create.assert_called_once_with(
- self.context, 1, '', '', image_id=image['id'],
- availability_zone=None, volume_type=None)
- vol_delete.assert_called_once_with(self.context, volume['id'])
+ self.volume_api.create.return_value = volume
+ wait_func = mock.MagicMock()
+ mock_exception = exception.VolumeNotCreated(volume_id=volume['id'],
+ seconds=1,
+ attempts=1,
+ volume_status='error')
+ wait_func.side_effect = mock_exception
+ self.assertRaises(exception.VolumeNotCreated,
+ test_bdm.attach, context=self.context,
+ instance=instance,
+ volume_api=self.volume_api,
+ virt_driver=self.virt_driver,
+ wait_func=wait_func)
+
+ self.volume_api.create.assert_called_once_with(
+ self.context, 1, '', '', image_id=image['id'],
+ availability_zone=None, volume_type=None)
+ self.volume_api.delete.assert_called_once_with(
+ self.context, volume['id'])
def test_image_attach_volume(self):
test_bdm = self.driver_classes['volimage'](
@@ -1125,19 +1174,17 @@ class TestDriverBlockDevice(test.NoDBTestCase):
instance = {'id': 'fake_id', 'uuid': uuids.uuid}
- with test.nested(
- mock.patch.object(self.driver_classes['volume'], 'attach'),
- mock.patch.object(self.volume_api, 'get_snapshot'),
- mock.patch.object(self.volume_api, 'create'),
- ) as (mock_attch, mock_get_snapshot, mock_create):
+ with mock.patch.object(
+ self.driver_classes['volume'], 'attach'
+ ) as mock_attach:
test_bdm.attach(self.context, instance, self.volume_api,
self.virt_driver)
self.assertEqual('fake-volume-id-2', test_bdm.volume_id)
- mock_attch.assert_called_once_with(
+ mock_attach.assert_called_once_with(
self.context, instance, self.volume_api, self.virt_driver)
# Make sure theses are not called
- mock_get_snapshot.assert_not_called()
- mock_create.assert_not_called()
+ self.volume_api.get_snapshot.assert_not_called()
+ self.volume_api.create.assert_not_called()
def test_blank_attach_fail_volume(self):
no_blank_volume = self.volblank_bdm_dict.copy()
@@ -1149,30 +1196,26 @@ class TestDriverBlockDevice(test.NoDBTestCase):
**{'uuid': uuids.uuid})
volume = {'id': 'fake-volume-id-2',
'display_name': '%s-blank-vol' % uuids.uuid}
+ self.volume_api.create.return_value = volume
+ wait_func = mock.MagicMock()
+ mock_exception = exception.VolumeNotCreated(volume_id=volume['id'],
+ seconds=1,
+ attempts=1,
+ volume_status='error')
+ wait_func.side_effect = mock_exception
+ self.assertRaises(exception.VolumeNotCreated,
+ test_bdm.attach, context=self.context,
+ instance=instance,
+ volume_api=self.volume_api,
+ virt_driver=self.virt_driver,
+ wait_func=wait_func)
- with test.nested(
- mock.patch.object(self.volume_api, 'create', return_value=volume),
- mock.patch.object(self.volume_api, 'delete'),
- ) as (vol_create, vol_delete):
- wait_func = mock.MagicMock()
- mock_exception = exception.VolumeNotCreated(volume_id=volume['id'],
- seconds=1,
- attempts=1,
- volume_status='error')
- wait_func.side_effect = mock_exception
- self.assertRaises(exception.VolumeNotCreated,
- test_bdm.attach, context=self.context,
- instance=instance,
- volume_api=self.volume_api,
- virt_driver=self.virt_driver,
- wait_func=wait_func)
-
- vol_create.assert_called_once_with(
- self.context, test_bdm.volume_size,
- '%s-blank-vol' % uuids.uuid,
- '', volume_type=None, availability_zone=None)
- vol_delete.assert_called_once_with(
- self.context, volume['id'])
+ self.volume_api.create.assert_called_once_with(
+ self.context, test_bdm.volume_size,
+ '%s-blank-vol' % uuids.uuid,
+ '', volume_type=None, availability_zone=None)
+ self.volume_api.delete.assert_called_once_with(
+ self.context, volume['id'])
def test_blank_attach_volume(self):
no_blank_volume = self.volblank_bdm_dict.copy()
@@ -1278,12 +1321,8 @@ class TestDriverBlockDevice(test.NoDBTestCase):
def test_is_implemented(self):
for bdm in (self.volimage_bdm, self.volume_bdm, self.swap_bdm,
- self.ephemeral_bdm, self.volsnapshot_bdm):
+ self.ephemeral_bdm, self.volsnapshot_bdm, self.image_bdm):
self.assertTrue(driver_block_device.is_implemented(bdm))
- local_image = self.volimage_bdm_dict.copy()
- local_image['destination_type'] = 'local'
- self.assertFalse(driver_block_device.is_implemented(
- fake_block_device.fake_bdm_object(self.context, local_image)))
def test_is_block_device_mapping(self):
test_swap = self.driver_classes['swap'](self.swap_bdm)
@@ -1481,13 +1520,9 @@ class TestDriverBlockDevice(test.NoDBTestCase):
'display_name': 'fake-snapshot-vol'}
self.stub_volume_create(volume)
- with test.nested(
- mock.patch.object(self.volume_api, 'get_snapshot',
- return_value=snapshot),
- mock.patch.object(volume_class, 'attach')
- ) as (
- vol_get_snap, vol_attach
- ):
+ self.volume_api.get_snapshot.return_value = snapshot
+
+ with mock.patch.object(volume_class, 'attach') as vol_attach:
test_bdm.attach(self.context, instance, self.volume_api,
self.virt_driver)
diff --git a/nova/tests/unit/virt/test_hardware.py b/nova/tests/unit/virt/test_hardware.py
index 6f552255d4..ab51a3e26c 100644
--- a/nova/tests/unit/virt/test_hardware.py
+++ b/nova/tests/unit/virt/test_hardware.py
@@ -14,11 +14,12 @@
import collections
import copy
-import ddt
+from unittest import mock
-import mock
+import ddt
import testtools
+import nova.conf
from nova import exception
from nova import objects
from nova.objects import fields
@@ -28,6 +29,8 @@ from nova.tests.unit import fake_pci_device_pools as fake_pci
from nova.tests.unit.image.fake import fake_image_obj
from nova.virt import hardware as hw
+CONF = nova.conf.CONF
+
class InstanceInfoTests(test.NoDBTestCase):
@@ -2020,6 +2023,7 @@ class NUMATopologyTest(test.NoDBTestCase):
memory=256,
cpu_usage=0,
memory_usage=0,
+ socket=0,
pinned_cpus=set(),
mempages=[
objects.NUMAPagesTopology(size_kb=4, total=32768, used=0),
@@ -2033,6 +2037,7 @@ class NUMATopologyTest(test.NoDBTestCase):
memory=256,
cpu_usage=0,
memory_usage=0,
+ socket=1,
pinned_cpus=set(),
mempages=[
objects.NUMAPagesTopology(size_kb=4, total=32768, used=64),
@@ -2046,6 +2051,7 @@ class NUMATopologyTest(test.NoDBTestCase):
memory=2,
cpu_usage=0,
memory_usage=0,
+ socket=2,
pinned_cpus=set(),
mempages=[
objects.NUMAPagesTopology(size_kb=4, total=512, used=16)],
@@ -2127,6 +2133,7 @@ class NUMATopologyTest(test.NoDBTestCase):
memory=160,
cpu_usage=0,
memory_usage=0,
+ socket=0,
pinned_cpus=set(),
mempages=[
objects.NUMAPagesTopology(size_kb=4, total=32768, used=32),
@@ -2167,6 +2174,7 @@ class NUMATopologyTest(test.NoDBTestCase):
memory=1024,
cpu_usage=0,
memory_usage=0,
+ socket=0,
pinned_cpus=set(),
mempages=[
objects.NUMAPagesTopology(size_kb=4, total=512, used=0)],
@@ -2178,6 +2186,7 @@ class NUMATopologyTest(test.NoDBTestCase):
memory=512,
cpu_usage=0,
memory_usage=0,
+ socket=0,
pinned_cpus=set(),
mempages=[
objects.NUMAPagesTopology(size_kb=4, total=512, used=0)],
@@ -2189,6 +2198,7 @@ class NUMATopologyTest(test.NoDBTestCase):
memory=512,
cpu_usage=0,
memory_usage=0,
+ socket=0,
pinned_cpus=set(),
mempages=[
objects.NUMAPagesTopology(size_kb=4, total=512, used=0)],
@@ -2246,7 +2256,7 @@ class NUMATopologyTest(test.NoDBTestCase):
self.assertEqual(hostusage.cells[2].cpu_usage, 1)
self.assertEqual(hostusage.cells[2].memory_usage, 256)
- def test_host_usage_culmulative_with_free(self):
+ def test_host_usage_cumulative_with_free(self):
hosttopo = objects.NUMATopology(cells=[
objects.NUMACell(
id=0,
@@ -2255,6 +2265,7 @@ class NUMATopologyTest(test.NoDBTestCase):
memory=1024,
cpu_usage=2,
memory_usage=512,
+ socket=0,
mempages=[
objects.NUMAPagesTopology(size_kb=4, total=512, used=0)],
siblings=[set([0]), set([1]), set([2]), set([3])],
@@ -2266,6 +2277,7 @@ class NUMATopologyTest(test.NoDBTestCase):
memory=512,
cpu_usage=1,
memory_usage=512,
+ socket=0,
pinned_cpus=set(),
mempages=[
objects.NUMAPagesTopology(size_kb=4, total=512, used=0)],
@@ -2277,6 +2289,7 @@ class NUMATopologyTest(test.NoDBTestCase):
memory=256,
cpu_usage=0,
memory_usage=0,
+ socket=0,
pinned_cpus=set(),
mempages=[
objects.NUMAPagesTopology(size_kb=4, total=512, used=0)],
@@ -2327,6 +2340,7 @@ class NUMATopologyTest(test.NoDBTestCase):
memory=512,
cpu_usage=0,
memory_usage=0,
+ socket=0,
pinned_cpus=set(),
mempages=[objects.NUMAPagesTopology(
size_kb=2048, total=512, used=128,
@@ -2339,6 +2353,7 @@ class NUMATopologyTest(test.NoDBTestCase):
memory=512,
cpu_usage=0,
memory_usage=0,
+ socket=0,
pinned_cpus=set(),
mempages=[objects.NUMAPagesTopology(
size_kb=1048576, total=5, used=2,
@@ -2603,6 +2618,7 @@ class VirtNUMAHostTopologyTestCase(test.NoDBTestCase):
memory=2048,
cpu_usage=2,
memory_usage=2048,
+ socket=0,
pinned_cpus=set(),
mempages=[objects.NUMAPagesTopology(
size_kb=4, total=524288, used=0)],
@@ -2613,6 +2629,7 @@ class VirtNUMAHostTopologyTestCase(test.NoDBTestCase):
memory=2048,
cpu_usage=2,
memory_usage=2048,
+ socket=0,
pinned_cpus=set(),
mempages=[objects.NUMAPagesTopology(
size_kb=4, total=524288, used=0)],
@@ -2635,45 +2652,45 @@ class VirtNUMAHostTopologyTestCase(test.NoDBTestCase):
def test_get_fitting_success_no_limits(self):
fitted_instance1 = hw.numa_fit_instance_to_host(
- self.host, self.instance1)
+ self.host, self.instance1, {})
self.assertIsInstance(fitted_instance1, objects.InstanceNUMATopology)
self.host = hw.numa_usage_from_instance_numa(
self.host, fitted_instance1)
fitted_instance2 = hw.numa_fit_instance_to_host(
- self.host, self.instance3)
+ self.host, self.instance3, {})
self.assertIsInstance(fitted_instance2, objects.InstanceNUMATopology)
def test_get_fitting_success_limits(self):
fitted_instance = hw.numa_fit_instance_to_host(
- self.host, self.instance3, self.limits)
+ self.host, self.instance3, {}, self.limits)
self.assertIsInstance(fitted_instance, objects.InstanceNUMATopology)
self.assertEqual(1, fitted_instance.cells[0].id)
def test_get_fitting_fails_no_limits(self):
fitted_instance = hw.numa_fit_instance_to_host(
- self.host, self.instance2, self.limits)
+ self.host, self.instance2, {}, self.limits)
self.assertIsNone(fitted_instance)
- def test_get_fitting_culmulative_fails_limits(self):
+ def test_get_fitting_cumulative_fails_limits(self):
fitted_instance1 = hw.numa_fit_instance_to_host(
- self.host, self.instance1, self.limits)
+ self.host, self.instance1, {}, self.limits)
self.assertIsInstance(fitted_instance1, objects.InstanceNUMATopology)
self.assertEqual(1, fitted_instance1.cells[0].id)
self.host = hw.numa_usage_from_instance_numa(
self.host, fitted_instance1)
fitted_instance2 = hw.numa_fit_instance_to_host(
- self.host, self.instance2, self.limits)
+ self.host, self.instance2, {}, self.limits)
self.assertIsNone(fitted_instance2)
- def test_get_fitting_culmulative_success_limits(self):
+ def test_get_fitting_cumulative_success_limits(self):
fitted_instance1 = hw.numa_fit_instance_to_host(
- self.host, self.instance1, self.limits)
+ self.host, self.instance1, {}, self.limits)
self.assertIsInstance(fitted_instance1, objects.InstanceNUMATopology)
self.assertEqual(1, fitted_instance1.cells[0].id)
self.host = hw.numa_usage_from_instance_numa(
self.host, fitted_instance1)
fitted_instance2 = hw.numa_fit_instance_to_host(
- self.host, self.instance3, self.limits)
+ self.host, self.instance3, {}, self.limits)
self.assertIsInstance(fitted_instance2, objects.InstanceNUMATopology)
self.assertEqual(2, fitted_instance2.cells[0].id)
@@ -2688,7 +2705,7 @@ class VirtNUMAHostTopologyTestCase(test.NoDBTestCase):
network_metadata=network_metadata)
fitted_instance = hw.numa_fit_instance_to_host(
- self.host, self.instance1, limits=limits)
+ self.host, self.instance1, {}, limits=limits)
self.assertIsInstance(fitted_instance, objects.InstanceNUMATopology)
mock_supports.assert_called_once_with(
@@ -2705,7 +2722,7 @@ class VirtNUMAHostTopologyTestCase(test.NoDBTestCase):
network_metadata=network_metadata)
fitted_instance = hw.numa_fit_instance_to_host(
- self.host, self.instance1, limits=limits)
+ self.host, self.instance1, {}, limits=limits)
self.assertIsNone(fitted_instance)
mock_supports.assert_has_calls([
@@ -2722,6 +2739,7 @@ class VirtNUMAHostTopologyTestCase(test.NoDBTestCase):
'support_requests', return_value= True):
fitted_instance1 = hw.numa_fit_instance_to_host(self.host,
self.instance1,
+ {},
pci_requests=pci_reqs,
pci_stats=pci_stats)
self.assertIsInstance(fitted_instance1,
@@ -2737,6 +2755,7 @@ class VirtNUMAHostTopologyTestCase(test.NoDBTestCase):
fitted_instance1 = hw.numa_fit_instance_to_host(
self.host,
self.instance1,
+ {},
pci_requests=pci_reqs,
pci_stats=pci_stats)
self.assertIsNone(fitted_instance1)
@@ -2753,9 +2772,9 @@ class VirtNUMAHostTopologyTestCase(test.NoDBTestCase):
# the PCI device is found on host cell 1
pci_stats = _create_pci_stats(1)
- # ...threfore an instance without a PCI device should get host cell 2
+ # ...therefore an instance without a PCI device should get host cell 2
instance_topology = hw.numa_fit_instance_to_host(
- self.host, self.instance1, pci_stats=pci_stats)
+ self.host, self.instance1, {}, pci_stats=pci_stats)
self.assertIsInstance(instance_topology, objects.InstanceNUMATopology)
# TODO(sfinucan): We should be comparing this against the HOST cell
self.assertEqual(2, instance_topology.cells[0].id)
@@ -2763,9 +2782,9 @@ class VirtNUMAHostTopologyTestCase(test.NoDBTestCase):
# the PCI device is now found on host cell 2
pci_stats = _create_pci_stats(2)
- # ...threfore an instance without a PCI device should get host cell 1
+ # ...therefore an instance without a PCI device should get host cell 1
instance_topology = hw.numa_fit_instance_to_host(
- self.host, self.instance1, pci_stats=pci_stats)
+ self.host, self.instance1, {}, pci_stats=pci_stats)
self.assertIsInstance(instance_topology, objects.InstanceNUMATopology)
self.assertEqual(1, instance_topology.cells[0].id)
@@ -2811,6 +2830,54 @@ class NumberOfSerialPortsTest(test.NoDBTestCase):
flavor, image_meta)
+class VirtLockMemoryTestCase(test.NoDBTestCase):
+ def _test_get_locked_memory_constraint(self, spec=None, props=None):
+ flavor = objects.Flavor(vcpus=16, memory_mb=2048,
+ extra_specs=spec or {})
+ image_meta = objects.ImageMeta.from_dict({"properties": props or {}})
+ return hw.get_locked_memory_constraint(flavor, image_meta)
+
+ def test_get_locked_memory_constraint_image(self):
+ self.assertTrue(
+ self._test_get_locked_memory_constraint(
+ spec={"hw:mem_page_size": "small"},
+ props={"hw_locked_memory": "True"}))
+
+ def test_get_locked_memory_conflict(self):
+ ex = self.assertRaises(
+ exception.FlavorImageLockedMemoryConflict,
+ self._test_get_locked_memory_constraint,
+ spec={
+ "hw:locked_memory": "False",
+ "hw:mem_page_size": "small"
+ },
+ props={"hw_locked_memory": "True"}
+ )
+ ex_msg = ("locked_memory value in image (True) and flavor (False) "
+ "conflict. A consistent value is expected if both "
+ "specified.")
+ self.assertEqual(ex_msg, str(ex))
+
+ def test_get_locked_memory_constraint_forbidden(self):
+ self.assertRaises(
+ exception.LockMemoryForbidden,
+ self._test_get_locked_memory_constraint,
+ {"hw:locked_memory": "True"})
+
+ self.assertRaises(
+ exception.LockMemoryForbidden,
+ self._test_get_locked_memory_constraint,
+ {},
+ {"hw_locked_memory": "True"})
+
+ def test_get_locked_memory_constraint_image_false(self):
+ # False value of locked_memory will not raise LockMemoryForbidden
+ self.assertFalse(
+ self._test_get_locked_memory_constraint(
+ spec=None,
+ props={"hw_locked_memory": "False"}))
+
+
class VirtMemoryPagesTestCase(test.NoDBTestCase):
def test_cell_instance_pagesize(self):
cell = objects.InstanceNUMACell(
@@ -3833,11 +3900,18 @@ class CPUPinningTestCase(test.NoDBTestCase, _CPUPinningTestCaseBase):
siblings=[set([2]), set([3])])
])
inst_topo = objects.InstanceNUMATopology(
- cells=[objects.InstanceNUMACell(
- cpuset=set(), pcpuset=set([0, 1]), memory=2048,
- cpu_policy=fields.CPUAllocationPolicy.DEDICATED)])
+ cells=[
+ objects.InstanceNUMACell(
+ id=0,
+ cpuset=set(),
+ pcpuset=set([0, 1]),
+ memory=2048,
+ cpu_policy=fields.CPUAllocationPolicy.DEDICATED,
+ )
+ ]
+ )
- inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo)
+ inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo, {})
for cell in inst_topo.cells:
self.assertInstanceCellPinned(cell, cell_ids=(0, 1))
@@ -3864,11 +3938,18 @@ class CPUPinningTestCase(test.NoDBTestCase, _CPUPinningTestCaseBase):
siblings=[set([2]), set([3])])
])
inst_topo = objects.InstanceNUMATopology(
- cells=[objects.InstanceNUMACell(
- cpuset=set(), pcpuset=set([0, 1]), memory=2048,
- cpu_policy=fields.CPUAllocationPolicy.DEDICATED)])
+ cells=[
+ objects.InstanceNUMACell(
+ id=0,
+ cpuset=set(),
+ pcpuset=set([0, 1]),
+ memory=2048,
+ cpu_policy=fields.CPUAllocationPolicy.DEDICATED,
+ )
+ ]
+ )
- inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo)
+ inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo, {})
for cell in inst_topo.cells:
self.assertInstanceCellPinned(cell, cell_ids=(1,))
@@ -3895,11 +3976,18 @@ class CPUPinningTestCase(test.NoDBTestCase, _CPUPinningTestCaseBase):
siblings=[set([2]), set([3])])
])
inst_topo = objects.InstanceNUMATopology(
- cells=[objects.InstanceNUMACell(
- cpuset=set(), pcpuset=set([0, 1]), memory=2048,
- cpu_policy=fields.CPUAllocationPolicy.DEDICATED)])
+ cells=[
+ objects.InstanceNUMACell(
+ id=0,
+ cpuset=set(),
+ pcpuset=set([0, 1]),
+ memory=2048,
+ cpu_policy=fields.CPUAllocationPolicy.DEDICATED,
+ )
+ ]
+ )
- inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo)
+ inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo, {})
self.assertIsNone(inst_topo)
def test_host_numa_fit_instance_to_host_fit(self):
@@ -3924,13 +4012,25 @@ class CPUPinningTestCase(test.NoDBTestCase, _CPUPinningTestCaseBase):
siblings=[set([4]), set([5]), set([6]), set([7])])
])
inst_topo = objects.InstanceNUMATopology(
- cells=[objects.InstanceNUMACell(
- cpuset=set(), pcpuset=set([0, 1]), memory=2048,
- cpu_policy=fields.CPUAllocationPolicy.DEDICATED),
- objects.InstanceNUMACell(
- cpuset=set(), pcpuset=set([2, 3]), memory=2048,
- cpu_policy=fields.CPUAllocationPolicy.DEDICATED)])
- inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo)
+ cells=[
+ objects.InstanceNUMACell(
+ id=0,
+ cpuset=set(),
+ pcpuset=set([0, 1]),
+ memory=2048,
+ cpu_policy=fields.CPUAllocationPolicy.DEDICATED,
+ ),
+ objects.InstanceNUMACell(
+ id=1,
+ cpuset=set(),
+ pcpuset=set([2, 3]),
+ memory=2048,
+ cpu_policy=fields.CPUAllocationPolicy.DEDICATED,
+ ),
+ ]
+ )
+
+ inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo, {})
for cell in inst_topo.cells:
self.assertInstanceCellPinned(cell, cell_ids=(0, 1))
@@ -3967,13 +4067,25 @@ class CPUPinningTestCase(test.NoDBTestCase, _CPUPinningTestCaseBase):
])
inst_topo = objects.InstanceNUMATopology(
- cells=[objects.InstanceNUMACell(
- cpuset=set(), pcpuset=set([0, 1]), memory=2048,
- cpu_policy=fields.CPUAllocationPolicy.DEDICATED),
- objects.InstanceNUMACell(
- cpuset=set(), pcpuset=set([2, 3]), memory=2048,
- cpu_policy=fields.CPUAllocationPolicy.DEDICATED)])
- inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo)
+ cells=[
+ objects.InstanceNUMACell(
+ id=0,
+ cpuset=set(),
+ pcpuset=set([0, 1]),
+ memory=2048,
+ cpu_policy=fields.CPUAllocationPolicy.DEDICATED,
+ ),
+ objects.InstanceNUMACell(
+ id=1,
+ cpuset=set(),
+ pcpuset=set([2, 3]),
+ memory=2048,
+ cpu_policy=fields.CPUAllocationPolicy.DEDICATED,
+ ),
+ ]
+ )
+
+ inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo, {})
for cell in inst_topo.cells:
self.assertInstanceCellPinned(cell, cell_ids=(0, 2))
@@ -4000,13 +4112,25 @@ class CPUPinningTestCase(test.NoDBTestCase, _CPUPinningTestCaseBase):
siblings=[set([4]), set([5]), set([6]), set([7])])
])
inst_topo = objects.InstanceNUMATopology(
- cells=[objects.InstanceNUMACell(
- cpuset=set(), pcpuset=set([0, 1]), memory=2048,
- cpu_policy=fields.CPUAllocationPolicy.DEDICATED),
- objects.InstanceNUMACell(
- cpuset=set(), pcpuset=set([2, 3]), memory=2048,
- cpu_policy=fields.CPUAllocationPolicy.DEDICATED)])
- inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo)
+ cells=[
+ objects.InstanceNUMACell(
+ id=0,
+ cpuset=set(),
+ pcpuset=set([0, 1]),
+ memory=2048,
+ cpu_policy=fields.CPUAllocationPolicy.DEDICATED,
+ ),
+ objects.InstanceNUMACell(
+ id=1,
+ cpuset=set(),
+ pcpuset=set([2, 3]),
+ memory=2048,
+ cpu_policy=fields.CPUAllocationPolicy.DEDICATED,
+ ),
+ ]
+ )
+
+ inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo, {})
self.assertIsNone(inst_topo)
def test_host_numa_fit_instance_to_host_fail_topology(self):
@@ -4040,7 +4164,7 @@ class CPUPinningTestCase(test.NoDBTestCase, _CPUPinningTestCaseBase):
objects.InstanceNUMACell(
cpuset=set(), pcpuset=set([4, 5]), memory=1024,
cpu_policy=fields.CPUAllocationPolicy.DEDICATED)])
- inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo)
+ inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo, {})
self.assertIsNone(inst_topo)
def test_cpu_pinning_usage_from_instances(self):
@@ -4052,6 +4176,7 @@ class CPUPinningTestCase(test.NoDBTestCase, _CPUPinningTestCaseBase):
memory=4096,
cpu_usage=0,
memory_usage=0,
+ socket=0,
pinned_cpus=set(),
siblings=[set([0]), set([1]), set([2]), set([3])],
mempages=[objects.NUMAPagesTopology(
@@ -4081,6 +4206,7 @@ class CPUPinningTestCase(test.NoDBTestCase, _CPUPinningTestCaseBase):
memory=4096,
cpu_usage=0,
memory_usage=0,
+ socket=0,
pinned_cpus=set([0, 1, 3]),
mempages=[objects.NUMAPagesTopology(
size_kb=4, total=524288, used=0)],
@@ -4110,6 +4236,7 @@ class CPUPinningTestCase(test.NoDBTestCase, _CPUPinningTestCaseBase):
memory=4096,
cpu_usage=0,
memory_usage=0,
+ socket=0,
pinned_cpus=set(),
siblings=[set([0]), set([1]), set([2]), set([3])],
mempages=[objects.NUMAPagesTopology(
@@ -4138,6 +4265,7 @@ class CPUPinningTestCase(test.NoDBTestCase, _CPUPinningTestCaseBase):
memory=4096,
cpu_usage=0,
memory_usage=0,
+ socket=0,
pinned_cpus=set(),
siblings=[set([0, 2]), set([1, 3])],
mempages=[objects.NUMAPagesTopology(
@@ -4164,6 +4292,7 @@ class CPUPinningTestCase(test.NoDBTestCase, _CPUPinningTestCaseBase):
memory=4096,
cpu_usage=0,
memory_usage=0,
+ socket=0,
pinned_cpus=set([0, 1, 2, 3]),
siblings=[set([0, 2]), set([1, 3])],
mempages=[objects.NUMAPagesTopology(
@@ -4190,6 +4319,7 @@ class CPUPinningTestCase(test.NoDBTestCase, _CPUPinningTestCaseBase):
memory=4096,
cpu_usage=0,
memory_usage=0,
+ socket=0,
pinned_cpus=set(),
siblings=[set([0]), set([1]), set([2]), set([3])],
mempages=[objects.NUMAPagesTopology(
@@ -4216,6 +4346,7 @@ class CPUPinningTestCase(test.NoDBTestCase, _CPUPinningTestCaseBase):
memory=4096,
cpu_usage=0,
memory_usage=0,
+ socket=0,
pinned_cpus=set([0, 1, 2, 3]),
siblings=[set([0]), set([1]), set([2]), set([3])],
mempages=[objects.NUMAPagesTopology(
@@ -4245,6 +4376,7 @@ class CPUPinningTestCase(test.NoDBTestCase, _CPUPinningTestCaseBase):
memory=4096,
cpu_usage=0,
memory_usage=0,
+ socket=0,
pinned_cpus=set([2]),
siblings=[set([0, 4]), set([1, 5]), set([2, 6]), set([3, 7])],
mempages=[objects.NUMAPagesTopology(
@@ -4275,6 +4407,7 @@ class CPUPinningTestCase(test.NoDBTestCase, _CPUPinningTestCaseBase):
memory=4096,
cpu_usage=2,
memory_usage=0,
+ socket=0,
pinned_cpus=set([2, 6, 7]),
siblings=[set([0, 4]), set([1, 5]), set([2, 6]), set([3, 7])],
mempages=[objects.NUMAPagesTopology(
@@ -4307,6 +4440,7 @@ class CPUPinningTestCase(test.NoDBTestCase, _CPUPinningTestCaseBase):
cpu_usage=2,
memory_usage=0,
pinned_cpus=set(),
+ socket=0,
siblings=[{cpu} for cpu in range(8)],
mempages=[objects.NUMAPagesTopology(
size_kb=4, total=524288, used=0)]
@@ -4340,6 +4474,7 @@ class CPUPinningTestCase(test.NoDBTestCase, _CPUPinningTestCaseBase):
memory=4096,
cpu_usage=2,
memory_usage=0,
+ socket=0,
pinned_cpus=set([0, 1, 2, 3]),
siblings=[{cpu} for cpu in range(8)],
mempages=[objects.NUMAPagesTopology(
@@ -4382,6 +4517,7 @@ class CPUPinningTestCase(test.NoDBTestCase, _CPUPinningTestCaseBase):
memory=4096,
cpu_usage=2,
memory_usage=0,
+ socket=0,
pinned_cpus=set(),
siblings=[set([0, 5]), set([1, 6]), set([2, 7]), set([3, 8]),
set([4, 9])],
@@ -4421,6 +4557,7 @@ class CPUPinningTestCase(test.NoDBTestCase, _CPUPinningTestCaseBase):
memory=4096,
cpu_usage=2,
memory_usage=0,
+ socket=0,
pinned_cpus=set([0, 1, 2, 5, 6, 7]),
siblings=[set([0, 5]), set([1, 6]), set([2, 7]), set([3, 8]),
set([4, 9])],
@@ -4656,6 +4793,7 @@ class EmulatorThreadsTestCase(test.NoDBTestCase):
memory=2048,
cpu_usage=0,
memory_usage=0,
+ socket=0,
pinned_cpus=set(),
siblings=[set([0]), set([1])],
mempages=[objects.NUMAPagesTopology(
@@ -4667,6 +4805,7 @@ class EmulatorThreadsTestCase(test.NoDBTestCase):
memory=2048,
cpu_usage=0,
memory_usage=0,
+ socket=0,
pinned_cpus=set(),
siblings=[set([2]), set([3])],
mempages=[objects.NUMAPagesTopology(
@@ -4680,7 +4819,7 @@ class EmulatorThreadsTestCase(test.NoDBTestCase):
cpuset=set(), pcpuset=set([0]), memory=2048,
cpu_policy=fields.CPUAllocationPolicy.DEDICATED)])
- inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo)
+ inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo, {})
self.assertEqual({0: 0}, inst_topo.cells[0].cpu_pinning)
self.assertIsNone(inst_topo.cells[0].cpuset_reserved)
@@ -4694,7 +4833,7 @@ class EmulatorThreadsTestCase(test.NoDBTestCase):
cpuset=set(), pcpuset=set([0]), memory=2048,
cpu_policy=fields.CPUAllocationPolicy.DEDICATED)])
- inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo)
+ inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo, {})
self.assertEqual({0: 0}, inst_topo.cells[0].cpu_pinning)
self.assertIsNone(inst_topo.cells[0].cpuset_reserved)
@@ -4708,7 +4847,7 @@ class EmulatorThreadsTestCase(test.NoDBTestCase):
cpuset=set(), pcpuset=set([0]), memory=2048,
cpu_policy=fields.CPUAllocationPolicy.DEDICATED)])
- inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo)
+ inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo, {})
self.assertEqual({0: 0}, inst_topo.cells[0].cpu_pinning)
self.assertEqual(set([1]), inst_topo.cells[0].cpuset_reserved)
@@ -4722,7 +4861,7 @@ class EmulatorThreadsTestCase(test.NoDBTestCase):
cpuset=set(), pcpuset=set([0, 1, 2, 4]), memory=2048,
cpu_policy=fields.CPUAllocationPolicy.DEDICATED)])
- inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo)
+ inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo, {})
self.assertIsNone(inst_topo)
def test_multi_nodes_isolate(self):
@@ -4739,7 +4878,7 @@ class EmulatorThreadsTestCase(test.NoDBTestCase):
cpuset=set(), pcpuset=set([1]), memory=2048,
cpu_policy=fields.CPUAllocationPolicy.DEDICATED)])
- inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo)
+ inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo, {})
self.assertEqual({0: 0}, inst_topo.cells[0].cpu_pinning)
self.assertEqual(set([1]), inst_topo.cells[0].cpuset_reserved)
self.assertEqual({1: 2}, inst_topo.cells[1].cpu_pinning)
@@ -4759,7 +4898,7 @@ class EmulatorThreadsTestCase(test.NoDBTestCase):
cpuset=set(), pcpuset=set([2]), memory=2048,
cpu_policy=fields.CPUAllocationPolicy.DEDICATED)])
- inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo)
+ inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo, {})
# The guest NUMA node 0 is requesting 2pCPUs + 1 additional
# pCPU for emulator threads, the host can't handle the
# request.
@@ -4779,7 +4918,7 @@ class EmulatorThreadsTestCase(test.NoDBTestCase):
cpuset=set(), pcpuset=set([1, 2]), memory=2048,
cpu_policy=fields.CPUAllocationPolicy.DEDICATED)])
- inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo)
+ inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo, {})
self.assertEqual({0: 0}, inst_topo.cells[0].cpu_pinning)
self.assertEqual(set([1]), inst_topo.cells[0].cpuset_reserved)
self.assertEqual({1: 2, 2: 3}, inst_topo.cells[1].cpu_pinning)
@@ -4854,7 +4993,7 @@ class EmulatorThreadsTestCase(test.NoDBTestCase):
cpu_thread_policy=fields.CPUThreadAllocationPolicy.ISOLATE
)])
- inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo)
+ inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo, {})
self.assertEqual({0: 0, 1: 2}, inst_topo.cells[0].cpu_pinning)
self.assertEqual(set([4]), inst_topo.cells[0].cpuset_reserved)
@@ -4884,7 +5023,7 @@ class EmulatorThreadsTestCase(test.NoDBTestCase):
cpu_thread_policy=fields.CPUThreadAllocationPolicy.ISOLATE
)])
- inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo)
+ inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo, {})
self.assertEqual({0: 2, 1: 4}, inst_topo.cells[0].cpu_pinning)
self.assertEqual(set([1]), inst_topo.cells[0].cpuset_reserved)
@@ -4913,7 +5052,7 @@ class EmulatorThreadsTestCase(test.NoDBTestCase):
if policy:
inst_topo.emulator_threads_policy = policy
- inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo)
+ inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo, {})
return inst_topo
def test_mixed_instance_not_define(self):
@@ -4970,7 +5109,7 @@ class EmulatorThreadsTestCase(test.NoDBTestCase):
cpuset=set(), pcpuset=set([0, 1]), memory=2048,
cpu_policy=fields.CPUAllocationPolicy.DEDICATED)])
- inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo)
+ inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo, {})
self.assertEqual({0: 2, 1: 3}, inst_topo.cells[0].cpu_pinning)
self.assertEqual(set([1]), inst_topo.cells[0].cpuset_reserved)
@@ -4999,7 +5138,7 @@ class EmulatorThreadsTestCase(test.NoDBTestCase):
cpu_thread_policy=fields.CPUThreadAllocationPolicy.ISOLATE
)])
- inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo)
+ inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo, {})
self.assertEqual({0: 2, 1: 4}, inst_topo.cells[0].cpu_pinning)
self.assertEqual(set([1]), inst_topo.cells[0].cpuset_reserved)
@@ -5254,7 +5393,7 @@ class MemEncryptionRequestedWithoutUEFITestCase(
expected_error = (
"Memory encryption requested by %(requesters)s but image "
"%(image_name)s doesn't have 'hw_firmware_type' property "
- "set to 'uefi'"
+ "set to 'uefi' or volume-backed instance was requested"
)
def _test_encrypted_memory_support_no_uefi(self, enc_extra_spec,
@@ -5381,6 +5520,25 @@ class MemEncryptionRequiredTestCase(test.NoDBTestCase):
(self.flavor_name, self.image_id)
)
+ def test_encrypted_memory_support_flavor_for_volume(self):
+ extra_specs = {'hw:mem_encryption': True}
+
+ flavor = objects.Flavor(name=self.flavor_name,
+ extra_specs=extra_specs)
+ # Following image_meta is typical for root Cinder volume
+ image_meta = objects.ImageMeta.from_dict({
+ 'min_disk': 0,
+ 'min_ram': 0,
+ 'properties': {},
+ 'size': 0,
+ 'status': 'active'})
+ # Confirm that exception.FlavorImageConflict is raised when
+ # flavor with hw:mem_encryption flag is used to create
+ # volume-backed instance
+ self.assertRaises(exception.FlavorImageConflict,
+ hw.get_mem_encryption_constraint, flavor,
+ image_meta)
+
class PCINUMAAffinityPolicyTest(test.NoDBTestCase):
@@ -5452,6 +5610,56 @@ class PCINUMAAffinityPolicyTest(test.NoDBTestCase):
image_meta.properties.hw_pci_numa_affinity_policy = "fake"
+class PMUEnabledTest(test.NoDBTestCase):
+
+ def test_pmu_image_and_flavor_conflict(self):
+ """Tests that calling _validate_flavor_image_nostatus()
+ with an image that conflicts with the flavor raises but no
+ exception is raised if there is no conflict.
+ """
+ flavor = objects.Flavor(
+ name='foo', vcpus=1, memory_mb=512, root_gb=1,
+ extra_specs={'hw:pmu': "true"})
+ image_meta = objects.ImageMeta.from_dict({
+ 'name': 'bar', 'properties': {'hw_pmu': False},
+ })
+ self.assertRaises(
+ exception.FlavorImageConflict,
+ hw.get_pmu_constraint,
+ flavor, image_meta)
+
+ def test_pmu_image_and_flavor_same_value(self):
+ # assert that if both the image and flavor are set to the same value
+ # no exception is raised and the function returns nothing.
+ flavor = objects.Flavor(
+ vcpus=1, memory_mb=512, root_gb=1, extra_specs={'hw:pmu': "true"})
+ image_meta = objects.ImageMeta.from_dict({
+ 'properties': {'hw_pmu': True},
+ })
+ self.assertTrue(hw.get_pmu_constraint(flavor, image_meta))
+
+ def test_pmu_image_only(self):
+ # assert that if only the image metadata is set then it is valid
+ flavor = objects.Flavor(
+ vcpus=1, memory_mb=512, root_gb=1, extra_specs={})
+
+ # ensure string to bool conversion works for image metadata
+ # property by using "yes".
+ image_meta = objects.ImageMeta.from_dict({
+ 'properties': {'hw_pmu': 'yes'},
+ })
+ self.assertTrue(hw.get_pmu_constraint(flavor, image_meta))
+
+ def test_pmu_flavor_only(self):
+ # assert that if only the flavor extra_spec is set then it is valid
+ # and test the string to bool conversion of "on" works.
+ flavor = objects.Flavor(
+ vcpus=1, memory_mb=512, root_gb=1, extra_specs={'hw:pmu': "on"})
+
+ image_meta = objects.ImageMeta.from_dict({'properties': {}})
+ self.assertTrue(hw.get_pmu_constraint(flavor, image_meta))
+
+
@ddt.ddt
class VIFMultiqueueEnabledTest(test.NoDBTestCase):
@@ -5614,3 +5822,251 @@ class RescuePropertyTestCase(test.NoDBTestCase):
meta = objects.ImageMeta.from_dict({'disk_format': 'raw'})
meta.properties = objects.ImageMetaProps.from_dict(props)
self.assertEqual(expected, hw.check_hw_rescue_props(meta))
+
+
+class HostCellsSortingTestCase(test.NoDBTestCase):
+ # NOTE (IPO) It is possible to test all sorting cases with one defined
+ # host NUMA topo.
+ # We have 4 NUMA cells with the following properties:
+ # NUMA cell 0: have most cpu usage
+ # NUMA cell 1: will have most PCI available
+ # NUMA cell 2: have most free pcpus
+ # NUMA cell 3: have most available memory
+ # So it will be enough to check order of NUMA cell in resulting instance
+ # topo to check particular sorting case.
+
+ def setUp(self):
+ super(HostCellsSortingTestCase, self).setUp()
+
+ def _create_pci_stats(node, count):
+ test_dict = copy.copy(fake_pci.fake_pool_dict)
+ test_dict['numa_node'] = node
+ test_dict['vendor_id'] = '8086'
+ test_dict['product_id'] = 'fake-prod0'
+ test_dict['count'] = count
+ return stats.PciDeviceStats(
+ objects.NUMATopology(),
+ [objects.PciDevicePool.from_dict(test_dict)])
+
+ self.pci_stats = _create_pci_stats(1, 2)
+
+ self.host = objects.NUMATopology(cells=[
+ objects.NUMACell(
+ id=0,
+ cpuset=set([1, 2, 3, 4]),
+ pcpuset=set([1, 2, 3, 4]),
+ memory=4096,
+ cpu_usage=3,
+ memory_usage=2048,
+ pinned_cpus=set([1, 2]),
+ mempages=[objects.NUMAPagesTopology(
+ size_kb=4, total=524288, used=0)],
+ siblings=[set([1]), set([2]), set([3]), set([4])]),
+ objects.NUMACell(
+ id=1,
+ cpuset=set([5, 6, 7, 8]),
+ pcpuset=set([5, 6, 7, 8]),
+ memory=4096,
+ cpu_usage=2,
+ memory_usage=2048,
+ pinned_cpus=set([5, 6]),
+ mempages=[objects.NUMAPagesTopology(
+ size_kb=4, total=524288, used=0)],
+ siblings=[set([5]), set([6]), set([7]), set([8])]),
+ objects.NUMACell(
+ id=2,
+ cpuset=set([9, 10, 11, 12]),
+ pcpuset=set([9, 10, 11, 12]),
+ memory=4096,
+ cpu_usage=2,
+ memory_usage=2048,
+ pinned_cpus=set(),
+ mempages=[objects.NUMAPagesTopology(
+ size_kb=4, total=524288, used=0)],
+ siblings=[set([9]), set([10]), set([11]), set([12])]),
+ objects.NUMACell(
+ id=3,
+ cpuset=set([13, 14, 15, 16]),
+ pcpuset=set([13, 14, 15, 16]),
+ memory=4096,
+ cpu_usage=2,
+ memory_usage=1024,
+ pinned_cpus=set([13, 14]),
+ mempages=[objects.NUMAPagesTopology(
+ size_kb=4, total=524288, used=0)],
+ siblings=[set([13]), set([14]), set([15]), set([16])])
+ ])
+
+ self.instance0 = objects.InstanceNUMATopology(cells=[
+ objects.InstanceNUMACell(
+ id=0, cpuset=set([0]), pcpuset=set(), memory=2048),
+ objects.InstanceNUMACell(
+ id=1, cpuset=set([1]), pcpuset=set(), memory=2048),
+ objects.InstanceNUMACell(
+ id=2, cpuset=set([2]), pcpuset=set(), memory=2048),
+ objects.InstanceNUMACell(
+ id=3, cpuset=set([3]), pcpuset=set(), memory=2048)
+ ])
+
+ self.instance1 = objects.InstanceNUMATopology(cells=[
+ objects.InstanceNUMACell(
+ id=0, cpuset=set([0]), pcpuset=set(), memory=2048),
+ objects.InstanceNUMACell(
+ id=1, cpuset=set([1]), pcpuset=set(), memory=2048),
+ objects.InstanceNUMACell(
+ id=2, cpuset=set([2]), pcpuset=set(), memory=2048),
+ ])
+
+ self.instance2 = objects.InstanceNUMATopology(cells=[
+ objects.InstanceNUMACell(
+ id=0,
+ cpuset=set(),
+ pcpuset=set([0]),
+ memory=2048,
+ cpu_policy=fields.CPUAllocationPolicy.DEDICATED
+ ),
+ objects.InstanceNUMACell(
+ id=1,
+ cpuset=set(),
+ pcpuset=set([1]),
+ memory=2048,
+ cpu_policy=fields.CPUAllocationPolicy.DEDICATED
+ ),
+ objects.InstanceNUMACell(
+ id=2,
+ cpuset=set(),
+ pcpuset=set([2]),
+ memory=2048,
+ cpu_policy=fields.CPUAllocationPolicy.DEDICATED
+ )])
+
+ def assertInstanceNUMAcellOrder(self, list_to_check, instance_topo):
+ for cell, id in zip(instance_topo.cells, list_to_check):
+ self.assertEqual(cell.id, id)
+
+ def test_sort_host_numa_cell_num_equal_instance_cell_num(self):
+ instance_topology = hw.numa_fit_instance_to_host(
+ self.host, self.instance0, {})
+ self.assertInstanceNUMAcellOrder([0, 1, 2, 3], instance_topology)
+
+ def test_sort_no_pci_stats_no_shared_cpu_policy(self):
+ CONF.set_override(
+ 'packing_host_numa_cells_allocation_strategy',
+ True,
+ group = 'compute')
+ instance_topology = hw.numa_fit_instance_to_host(
+ self.host, self.instance2, {})
+ self.assertInstanceNUMAcellOrder([0, 1, 3], instance_topology)
+ CONF.set_override(
+ 'packing_host_numa_cells_allocation_strategy',
+ False,
+ group = 'compute')
+ instance_topology = hw.numa_fit_instance_to_host(
+ self.host, self.instance2, {})
+ self.assertInstanceNUMAcellOrder([2, 3, 0], instance_topology)
+
+ def test_sort_no_pci_stats_shared_cpu_policy(self):
+ CONF.set_override(
+ 'packing_host_numa_cells_allocation_strategy',
+ True,
+ group = 'compute')
+ instance_topology = hw.numa_fit_instance_to_host(
+ self.host, self.instance1, {})
+ self.assertInstanceNUMAcellOrder([0, 1, 2], instance_topology)
+ CONF.set_override(
+ 'packing_host_numa_cells_allocation_strategy',
+ False,
+ group = 'compute')
+ instance_topology = hw.numa_fit_instance_to_host(
+ self.host, self.instance1, {})
+ self.assertInstanceNUMAcellOrder([3, 1, 2], instance_topology)
+
+ def test_sort_pci_stats_pci_req_no_shared_cpu_policy(self):
+ CONF.set_override(
+ 'packing_host_numa_cells_allocation_strategy',
+ True,
+ group = 'compute')
+ pci_request = objects.InstancePCIRequest(count=1,
+ spec=[{'vendor_id': '8086', 'product_id': 'fake-prod0'}])
+ pci_reqs = [pci_request]
+ instance_topology = hw.numa_fit_instance_to_host(
+ self.host, self.instance2,
+ {},
+ pci_requests = pci_reqs,
+ pci_stats = self.pci_stats)
+ self.assertInstanceNUMAcellOrder([1, 0, 3], instance_topology)
+ CONF.set_override(
+ 'packing_host_numa_cells_allocation_strategy',
+ False,
+ group = 'compute')
+ instance_topology = hw.numa_fit_instance_to_host(
+ self.host, self.instance2,
+ {},
+ pci_requests = pci_reqs,
+ pci_stats = self.pci_stats)
+ self.assertInstanceNUMAcellOrder([1, 2, 3], instance_topology)
+
+ def test_sort_pci_stats_pci_req_shared_cpu_policy(self):
+ CONF.set_override(
+ 'packing_host_numa_cells_allocation_strategy',
+ True,
+ group = 'compute')
+ pci_request = objects.InstancePCIRequest(count=1,
+ spec=[{'vendor_id': '8086', 'product_id': 'fake-prod0'}])
+ pci_reqs = [pci_request]
+ instance_topology = hw.numa_fit_instance_to_host(
+ self.host, self.instance1,
+ {},
+ pci_requests = pci_reqs,
+ pci_stats = self.pci_stats)
+ self.assertInstanceNUMAcellOrder([1, 0, 2], instance_topology)
+ CONF.set_override(
+ 'packing_host_numa_cells_allocation_strategy',
+ False,
+ group = 'compute')
+ instance_topology = hw.numa_fit_instance_to_host(
+ self.host, self.instance1,
+ {},
+ pci_requests = pci_reqs,
+ pci_stats = self.pci_stats)
+ self.assertInstanceNUMAcellOrder([1, 3, 2], instance_topology)
+
+ def test_sort_pci_stats_no_pci_req_no_shared_cpu_policy(self):
+ CONF.set_override(
+ 'packing_host_numa_cells_allocation_strategy',
+ True,
+ group = 'compute')
+ instance_topology = hw.numa_fit_instance_to_host(
+ self.host, self.instance2,
+ {},
+ pci_stats = self.pci_stats)
+ self.assertInstanceNUMAcellOrder([0, 3, 2], instance_topology)
+ CONF.set_override(
+ 'packing_host_numa_cells_allocation_strategy',
+ False,
+ group = 'compute')
+ instance_topology = hw.numa_fit_instance_to_host(
+ self.host, self.instance2,
+ {},
+ pci_stats = self.pci_stats)
+ self.assertInstanceNUMAcellOrder([2, 3, 0], instance_topology)
+
+ def test_sort_pci_stats_no_pci_req_shared_cpu_policy(self):
+ CONF.set_override(
+ 'packing_host_numa_cells_allocation_strategy',
+ True,
+ group = 'compute')
+ instance_topology = hw.numa_fit_instance_to_host(
+ self.host, self.instance1,
+ {},
+ pci_stats = self.pci_stats)
+ self.assertInstanceNUMAcellOrder([0, 2, 3], instance_topology)
+ CONF.set_override(
+ 'packing_host_numa_cells_allocation_strategy',
+ False,
+ group = 'compute')
+ instance_topology = hw.numa_fit_instance_to_host(
+ self.host, self.instance1,
+ {},
+ pci_stats = self.pci_stats)
+ self.assertInstanceNUMAcellOrder([3, 2, 0], instance_topology)
diff --git a/nova/tests/unit/virt/test_imagecache.py b/nova/tests/unit/virt/test_imagecache.py
index ca4389fa14..b97e520074 100644
--- a/nova/tests/unit/virt/test_imagecache.py
+++ b/nova/tests/unit/virt/test_imagecache.py
@@ -12,7 +12,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from oslo_utils.fixture import uuidsentinel as uuids
from nova import block_device
@@ -64,7 +65,7 @@ ephemeral_bdm = [block_device.BlockDeviceDict(
class ImageCacheManagerTests(test.NoDBTestCase):
- def test_configurationi_defaults(self):
+ def test_configuration_defaults(self):
self.assertEqual(2400, CONF.image_cache.manager_interval)
self.assertEqual('_base', CONF.image_cache.subdirectory_name)
self.assertTrue(CONF.image_cache.remove_unused_base_images)
diff --git a/nova/tests/unit/virt/test_images.py b/nova/tests/unit/virt/test_images.py
index 085b169db3..62a61c1e8b 100644
--- a/nova/tests/unit/virt/test_images.py
+++ b/nova/tests/unit/virt/test_images.py
@@ -13,9 +13,11 @@
# under the License.
import os
+from unittest import mock
-import mock
from oslo_concurrency import processutils
+from oslo_serialization import jsonutils
+from oslo_utils import imageutils
from nova.compute import utils as compute_utils
from nova import exception
@@ -135,3 +137,47 @@ class QemuTestCase(test.NoDBTestCase):
'-O', 'out_format', '-f', 'in_format', 'source', 'dest')
mock_disk_op_sema.__enter__.assert_called_once()
self.assertTupleEqual(expected, mock_execute.call_args[0])
+
+ def test_convert_image_vmdk_allowed_list_checking(self):
+ info = {'format': 'vmdk',
+ 'format-specific': {
+ 'type': 'vmdk',
+ 'data': {
+ 'create-type': 'monolithicFlat',
+ }}}
+
+ # If the format is not in the allowed list, we should get an error
+ self.assertRaises(exception.ImageUnacceptable,
+ images.check_vmdk_image, 'foo',
+ imageutils.QemuImgInfo(jsonutils.dumps(info),
+ format='json'))
+
+ # With the format in the allowed list, no error
+ self.flags(vmdk_allowed_types=['streamOptimized', 'monolithicFlat',
+ 'monolithicSparse'],
+ group='compute')
+ images.check_vmdk_image('foo',
+ imageutils.QemuImgInfo(jsonutils.dumps(info),
+ format='json'))
+
+ # With an empty list, allow nothing
+ self.flags(vmdk_allowed_types=[], group='compute')
+ self.assertRaises(exception.ImageUnacceptable,
+ images.check_vmdk_image, 'foo',
+ imageutils.QemuImgInfo(jsonutils.dumps(info),
+ format='json'))
+
+ @mock.patch.object(images, 'fetch')
+ @mock.patch('nova.privsep.qemu.unprivileged_qemu_img_info')
+ def test_fetch_checks_vmdk_rules(self, mock_info, mock_fetch):
+ info = {'format': 'vmdk',
+ 'format-specific': {
+ 'type': 'vmdk',
+ 'data': {
+ 'create-type': 'monolithicFlat',
+ }}}
+ mock_info.return_value = jsonutils.dumps(info)
+ with mock.patch('os.path.exists', return_value=True):
+ e = self.assertRaises(exception.ImageUnacceptable,
+ images.fetch_to_raw, None, 'foo', 'anypath')
+ self.assertIn('Invalid VMDK create-type specified', str(e))
diff --git a/nova/tests/unit/virt/test_netutils.py b/nova/tests/unit/virt/test_netutils.py
index de3f451351..fa0e16df19 100644
--- a/nova/tests/unit/virt/test_netutils.py
+++ b/nova/tests/unit/virt/test_netutils.py
@@ -17,6 +17,17 @@ from nova.virt import netutils
class TestNetUtilsTestCase(test.NoDBTestCase):
+
+ def _get_fake_instance_nw_info(self, num_networks, dhcp_server, mtu):
+ network_info = fake_network.fake_get_instance_nw_info(self,
+ num_networks)
+ for vif in network_info:
+ for subnet in vif['network']['subnets']:
+ subnet['meta']['dhcp_server'] = dhcp_server
+ vif['network']['meta']['mtu'] = mtu
+
+ return network_info
+
def test_get_cached_vifs_with_vlan_no_nw_info(self):
# Make sure that an empty dictionary will be returned when
# nw_info is None
@@ -39,3 +50,15 @@ class TestNetUtilsTestCase(test.NoDBTestCase):
expected = {'fa:16:3e:d1:28:e4': '2145'}
self.assertEqual(expected,
netutils.get_cached_vifs_with_vlan(network_info))
+
+ def test__get_link_mtu(self):
+ network_info_dhcp = self._get_fake_instance_nw_info(
+ 1, '192.168.0.100', 9000)
+ network_info_no_dhcp = self._get_fake_instance_nw_info(
+ 1, None, 9000)
+
+ for vif in network_info_dhcp:
+ self.assertIsNone(netutils._get_link_mtu(vif))
+
+ for vif in network_info_no_dhcp:
+ self.assertEqual(9000, netutils._get_link_mtu(vif))
diff --git a/nova/tests/unit/virt/test_node.py b/nova/tests/unit/virt/test_node.py
new file mode 100644
index 0000000000..668b762520
--- /dev/null
+++ b/nova/tests/unit/virt/test_node.py
@@ -0,0 +1,142 @@
+# Copyright 2022 Red Hat, inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import os
+from unittest import mock
+import uuid
+
+import fixtures
+from oslo_config import cfg
+from oslo_utils.fixture import uuidsentinel as uuids
+import testtools
+
+from nova import exception
+from nova import test
+from nova.tests import fixtures as nova_fixtures
+from nova.virt import node
+
+CONF = cfg.CONF
+
+
+# NOTE(danms): We do not inherit from test.TestCase because we need
+# our node methods not stubbed out in order to exercise them.
+class TestNodeIdentity(testtools.TestCase):
+ def flags(self, **kw):
+ """Override flag variables for a test."""
+ group = kw.pop('group', None)
+ for k, v in kw.items():
+ CONF.set_override(k, v, group)
+
+ def setUp(self):
+ super().setUp()
+ self.useFixture(nova_fixtures.ConfFixture(CONF))
+ self.tempdir = self.useFixture(fixtures.TempDir()).path
+ self.identity_file = os.path.join(self.tempdir, node.COMPUTE_ID_FILE)
+ self.fake_config_files = ['%s/etc/nova.conf' % self.tempdir,
+ '%s/etc/nova/nova.conf' % self.tempdir,
+ '%s/opt/etc/nova/nova.conf' % self.tempdir]
+ for fn in self.fake_config_files:
+ os.makedirs(os.path.dirname(fn))
+ self.flags(state_path=self.tempdir,
+ config_file=self.fake_config_files)
+ node.LOCAL_NODE_UUID = None
+
+ def test_generate_local_node_uuid(self):
+ node_uuid = uuids.node
+ node.write_local_node_uuid(node_uuid)
+
+ e = self.assertRaises(exception.InvalidNodeConfiguration,
+ node.write_local_node_uuid, 'anything')
+ self.assertIn(
+ 'Identity file %s appeared unexpectedly' % self.identity_file,
+ str(e))
+
+ def test_generate_local_node_uuid_unexpected_open_fail(self):
+ with mock.patch('builtins.open') as mock_open:
+ mock_open.side_effect = IndexError()
+ e = self.assertRaises(exception.InvalidNodeConfiguration,
+ node.write_local_node_uuid, 'foo')
+ self.assertIn('Unable to write uuid to %s' % (
+ self.identity_file), str(e))
+
+ def test_generate_local_node_uuid_unexpected_write_fail(self):
+ with mock.patch('builtins.open') as mock_open:
+ mock_write = mock_open.return_value.__enter__.return_value.write
+ mock_write.side_effect = IndexError()
+ e = self.assertRaises(exception.InvalidNodeConfiguration,
+ node.write_local_node_uuid, 'foo')
+ self.assertIn('Unable to write uuid to %s' % (
+ self.identity_file), str(e))
+
+ def test_get_local_node_uuid_simple_exists(self):
+ node_uuid = uuids.node
+ with test.patch_open('%s/etc/nova/compute_id' % self.tempdir,
+ node_uuid):
+ self.assertEqual(node_uuid, node.get_local_node_uuid())
+
+ def test_get_local_node_uuid_simple_exists_whitespace(self):
+ node_uuid = uuids.node
+ # Make sure we strip whitespace from the file contents
+ with test.patch_open('%s/etc/nova/compute_id' % self.tempdir,
+ ' %s \n' % node_uuid):
+ self.assertEqual(node_uuid, node.get_local_node_uuid())
+
+ def test_get_local_node_uuid_simple_generate(self):
+ self.assertIsNone(node.LOCAL_NODE_UUID)
+ node_uuid1 = node.get_local_node_uuid()
+ self.assertEqual(node_uuid1, node.LOCAL_NODE_UUID)
+ node_uuid2 = node.get_local_node_uuid()
+ self.assertEqual(node_uuid2, node.LOCAL_NODE_UUID)
+
+ # Make sure we got the same thing each time, and that it's a
+ # valid uuid. Since we provided no uuid, it must have been
+ # generated the first time and read/returned the second.
+ self.assertEqual(node_uuid1, node_uuid2)
+ uuid.UUID(node_uuid1)
+
+ # Try to read it directly to make sure the file was really
+ # created and with the right value.
+ self.assertEqual(node_uuid1, node.read_local_node_uuid())
+
+ def test_get_local_node_uuid_two(self):
+ node_uuid = uuids.node
+
+ # Write the uuid to two of our locations
+ for cf in (self.fake_config_files[0], self.fake_config_files[1]):
+ open(os.path.join(os.path.dirname(cf),
+ node.COMPUTE_ID_FILE), 'w').write(node_uuid)
+
+ # Make sure we got the expected uuid and that no exceptions
+ # were raised about the files disagreeing
+ self.assertEqual(node_uuid, node.get_local_node_uuid())
+
+ def test_get_local_node_uuid_two_mismatch(self):
+ node_uuids = [uuids.node1, uuids.node2]
+
+ # Write a different uuid to each file
+ for id, fn in zip(node_uuids, self.fake_config_files):
+ open(os.path.join(
+ os.path.dirname(fn),
+ node.COMPUTE_ID_FILE), 'w').write(id)
+
+ # Make sure we get an error that identifies the mismatching
+ # file with its uuid, as well as what we expected to find
+ e = self.assertRaises(exception.InvalidNodeConfiguration,
+ node.get_local_node_uuid)
+ expected = ('UUID %s in %s does not match %s' % (
+ node_uuids[1],
+ os.path.join(os.path.dirname(self.fake_config_files[1]),
+ 'compute_id'),
+ node_uuids[0]))
+ self.assertIn(expected, str(e))
diff --git a/nova/tests/unit/virt/test_osinfo.py b/nova/tests/unit/virt/test_osinfo.py
index af3698b541..5d927deab1 100644
--- a/nova/tests/unit/virt/test_osinfo.py
+++ b/nova/tests/unit/virt/test_osinfo.py
@@ -12,8 +12,9 @@
# License for the specific language governing permissions and limitations
# under the License.
+from unittest import mock
+
import fixtures
-import mock
from nova import exception
from nova import objects
diff --git a/nova/tests/unit/virt/test_virt.py b/nova/tests/unit/virt/test_virt.py
index 11f306c761..2d108c6f2d 100644
--- a/nova/tests/unit/virt/test_virt.py
+++ b/nova/tests/unit/virt/test_virt.py
@@ -14,8 +14,8 @@
# under the License.
import io
+from unittest import mock
-import mock
import os_traits
from nova import test
@@ -102,6 +102,33 @@ class TestVirtDriver(test.NoDBTestCase):
self.assertFalse(traits[os_traits.COMPUTE_IMAGE_TYPE_RAW])
self.assertFalse(traits[os_traits.COMPUTE_IMAGE_TYPE_VHD])
+ def test_block_device_info_get_encrypted_disks(self):
+ block_device_info = {
+ 'swap': {'device_name': '/dev/sdb', 'swap_size': 1},
+ 'image': [
+ {'device_name': '/dev/vda', 'encrypted': True},
+ ],
+ 'ephemerals': [
+ {'device_name': '/dev/vdb', 'encrypted': True},
+ {'device_name': '/dev/vdc', 'encrypted': False},
+ ],
+ }
+ disks = driver.block_device_info_get_encrypted_disks(block_device_info)
+ expected = [
+ {'device_name': '/dev/vda', 'encrypted': True},
+ {'device_name': '/dev/vdb', 'encrypted': True},
+ ]
+ self.assertEqual(expected, disks)
+ # Try removing 'image'
+ block_device_info.pop('image')
+ disks = driver.block_device_info_get_encrypted_disks(block_device_info)
+ expected = [{'device_name': '/dev/vdb', 'encrypted': True}]
+ self.assertEqual(expected, disks)
+ # Remove 'ephemerals'
+ block_device_info.pop('ephemerals')
+ disks = driver.block_device_info_get_encrypted_disks(block_device_info)
+ self.assertEqual([], disks)
+
class FakeMount(object):
def __init__(self, image, mount_dir, partition=None, device=None):
diff --git a/nova/tests/unit/virt/test_virt_drivers.py b/nova/tests/unit/virt/test_virt_drivers.py
index 8dcad485bc..ed9f1e3822 100644
--- a/nova/tests/unit/virt/test_virt_drivers.py
+++ b/nova/tests/unit/virt/test_virt_drivers.py
@@ -15,9 +15,9 @@
from collections import deque
import sys
import traceback
+from unittest import mock
import fixtures
-import mock
import netaddr
import os_resource_classes as orc
import os_vif
@@ -168,7 +168,7 @@ class VirtDriverLoaderTestCase(_FakeDriverBackendTestCase, test.TestCase):
self.fail("Couldn't load driver %s - %s" % (cls, e))
self.assertEqual(cm.driver.__class__.__name__, driver,
- "Could't load driver %s" % cls)
+ "Couldn't load driver %s" % cls)
@mock.patch.object(sys, 'exit', side_effect=test.TestingException())
def test_fail_to_load_new_drivers(self, mock_exit):
@@ -746,13 +746,13 @@ class _VirtDriverTestCase(_FakeDriverBackendTestCase):
self.flags(cpu_allocation_ratio=16.1)
self.flags(ram_allocation_ratio=1.6)
self.flags(disk_allocation_ratio=1.1)
- expeced_ratios = {
+ expected_ratios = {
orc.VCPU: CONF.cpu_allocation_ratio,
orc.MEMORY_MB: CONF.ram_allocation_ratio,
orc.DISK_GB: CONF.disk_allocation_ratio
}
# If conf is set, return conf
- self.assertEqual(expeced_ratios,
+ self.assertEqual(expected_ratios,
self.connection._get_allocation_ratios(inv))
self.flags(cpu_allocation_ratio=None)
@@ -761,25 +761,25 @@ class _VirtDriverTestCase(_FakeDriverBackendTestCase):
self.flags(initial_cpu_allocation_ratio=15.9)
self.flags(initial_ram_allocation_ratio=1.4)
self.flags(initial_disk_allocation_ratio=0.9)
- expeced_ratios = {
+ expected_ratios = {
orc.VCPU: CONF.initial_cpu_allocation_ratio,
orc.MEMORY_MB: CONF.initial_ram_allocation_ratio,
orc.DISK_GB: CONF.initial_disk_allocation_ratio
}
# if conf is unset and inv doesn't exists, return init conf
- self.assertEqual(expeced_ratios,
+ self.assertEqual(expected_ratios,
self.connection._get_allocation_ratios(inv))
inv = {orc.VCPU: {'allocation_ratio': 3.0},
orc.MEMORY_MB: {'allocation_ratio': 3.1},
orc.DISK_GB: {'allocation_ratio': 3.2}}
- expeced_ratios = {
+ expected_ratios = {
orc.VCPU: inv[orc.VCPU]['allocation_ratio'],
orc.MEMORY_MB: inv[orc.MEMORY_MB]['allocation_ratio'],
orc.DISK_GB: inv[orc.DISK_GB]['allocation_ratio']
}
# if conf is unset and inv exists, return inv
- self.assertEqual(expeced_ratios,
+ self.assertEqual(expected_ratios,
self.connection._get_allocation_ratios(inv))
@@ -832,6 +832,7 @@ class LibvirtConnTestCase(_VirtDriverTestCase, test.TestCase):
# This is needed for the live migration tests which spawn off the
# operation for monitoring.
self.useFixture(nova_fixtures.SpawnIsSynchronousFixture())
+ self.useFixture(nova_fixtures.CGroupsFixture())
# When destroying an instance, os-vif will try to execute some commands
# which hang tests so let's just stub out the unplug call to os-vif
# since we don't care about it.
diff --git a/nova/tests/unit/virt/vmwareapi/__init__.py b/nova/tests/unit/virt/vmwareapi/__init__.py
index e69de29bb2..206b60cb8f 100644
--- a/nova/tests/unit/virt/vmwareapi/__init__.py
+++ b/nova/tests/unit/virt/vmwareapi/__init__.py
@@ -0,0 +1,20 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import unittest
+
+try:
+ import oslo_vmware # noqa: F401
+except ImportError:
+ raise unittest.SkipTest(
+ "The 'oslo.vmware' dependency is not installed."
+ )
diff --git a/nova/tests/unit/virt/vmwareapi/fake.py b/nova/tests/unit/virt/vmwareapi/fake.py
index b98a287613..2c09afb8ec 100644
--- a/nova/tests/unit/virt/vmwareapi/fake.py
+++ b/nova/tests/unit/virt/vmwareapi/fake.py
@@ -23,11 +23,11 @@ import collections
import sys
from oslo_log import log as logging
-from oslo_serialization import jsonutils
from oslo_utils import units
from oslo_utils import uuidutils
from oslo_vmware import exceptions as vexc
from oslo_vmware.objects import datastore as ds_obj
+from oslo_vmware import vim_util
from nova import exception
from nova.virt.vmwareapi import constants
@@ -76,23 +76,34 @@ def cleanup():
_db_content[c] = {}
-def _create_object(table, table_obj):
+def _create_object(table_obj):
"""Create an object in the db."""
- _db_content.setdefault(table, {})
- _db_content[table][table_obj.obj] = table_obj
+ _db_content.setdefault(table_obj.obj._type, {})
+ update_object(table_obj)
-def _get_object(obj_ref):
+def get_object(obj_ref):
"""Get object for the give reference."""
- return _db_content[obj_ref.type][obj_ref]
+ return _db_content[obj_ref.type][obj_ref.value]
-def _get_objects(obj_type):
+def get_objects(obj_type):
"""Get objects of the type."""
- lst_objs = FakeRetrieveResult()
- for key in _db_content[obj_type]:
- lst_objs.add_object(_db_content[obj_type][key])
- return lst_objs
+ return _db_content[obj_type].values()
+
+
+def get_first_object(obj_type):
+ """Get the first object of an object type"""
+ return next(iter(_db_content[obj_type].values()))
+
+
+def get_first_object_ref(obj_type):
+ """Get the first reference of an object type"""
+ return get_first_object(obj_type).obj
+
+
+def _no_objects_of_type(obj_type):
+ return not _db_content.get(obj_type)
def _convert_to_array_of_mor(mors):
@@ -135,21 +146,19 @@ class FakeRetrieveResult(object):
if token is not None:
self.token = token
- def add_object(self, object):
- self.objects.append(object)
+ def add_object(self, obj):
+ self.objects.append(obj)
-def _get_object_refs(obj_type):
- """Get object References of the type."""
- lst_objs = []
- for key in _db_content[obj_type]:
- lst_objs.append(key)
- return lst_objs
+def get_object_refs(obj_type):
+ """Get iterator over object References of the type."""
+ for obj in _db_content[obj_type].values():
+ yield obj.obj
-def _update_object(table, table_obj):
+def update_object(table_obj):
"""Update objects of the type."""
- _db_content[table][table_obj.obj] = table_obj
+ _db_content[table_obj.obj._type][table_obj.obj.value] = table_obj
class Prop(object):
@@ -177,6 +186,14 @@ class ManagedObjectReference(object):
self.type = name
self._type = name
+ def __repr__(self):
+ return f'{self._type}:{self.value}'
+
+ def __eq__(self, other):
+ return (other is not None and
+ vim_util.get_moref_value(other) == self.value and
+ vim_util.get_moref_type(other) == self.type)
+
class ObjectContent(object):
"""ObjectContent array holds dynamic properties."""
@@ -262,8 +279,11 @@ class ManagedObject(object):
return prefix + "-" + str(self.__class__._counter)
def __repr__(self):
- return jsonutils.dumps({elem.name: elem.val
- for elem in self.propSet})
+ # We can't just dump the managed-object, because it may be circular
+ return "{}:{}({})".format(self.obj._type, self.obj.value,
+ ", ".join(
+ "{}={}".format(p.name, p.val if p.name == "name" else "<>")
+ for p in self.propSet))
class DataObject(object):
@@ -593,8 +613,7 @@ class ResourcePool(ManagedObject):
class DatastoreHostMount(DataObject):
def __init__(self, value='host-100'):
super(DatastoreHostMount, self).__init__()
- host_ref = (_db_content["HostSystem"]
- [list(_db_content["HostSystem"].keys())[0]].obj)
+ host_ref = get_first_object_ref("HostSystem")
host_system = DataObject()
host_system.ManagedObjectReference = [host_ref]
host_system.value = value
@@ -621,9 +640,15 @@ class ClusterComputeResource(ManagedObject):
summary.effectiveCpu = 10000
self.set("summary", summary)
+ vm_list = DataObject()
+ vm_list.ManagedObjectReference = []
+ self.set("vm", vm_list)
+
def _add_root_resource_pool(self, r_pool):
if r_pool:
self.set("resourcePool", r_pool)
+ pool = get_object(r_pool)
+ self.set("vm", pool.get("vm"))
def _add_host(self, host_sys):
if host_sys:
@@ -659,7 +684,7 @@ class ClusterComputeResource(ManagedObject):
# Compute the aggregate stats
summary.numHosts = len(hosts.ManagedObjectReference)
for host_ref in hosts.ManagedObjectReference:
- host_sys = _get_object(host_ref)
+ host_sys = get_object(host_ref)
connected = host_sys.get("connected")
host_summary = host_sys.get("summary")
summary.numCpuCores += host_summary.hardware.numCpuCores
@@ -717,14 +742,17 @@ class HostSystem(ManagedObject):
maintenance_mode=False):
super(HostSystem, self).__init__("host")
self.set("name", name)
- if _db_content.get("HostNetworkSystem", None) is None:
+ if _no_objects_of_type("HostNetworkSystem"):
create_host_network_system()
- if not _get_object_refs('HostStorageSystem'):
+
+ if _no_objects_of_type("HostStorageSystem"):
create_host_storage_system()
- host_net_key = list(_db_content["HostNetworkSystem"].keys())[0]
- host_net_sys = _db_content["HostNetworkSystem"][host_net_key].obj
- self.set("configManager.networkSystem", host_net_sys)
- host_storage_sys_key = _get_object_refs('HostStorageSystem')[0]
+
+ host_net_obj = get_first_object("HostNetworkSystem")
+ host_net_ref = host_net_obj.obj
+ self.set("configManager.networkSystem", host_net_ref)
+
+ host_storage_sys_key = get_first_object_ref('HostStorageSystem')
self.set("configManager.storageSystem", host_storage_sys_key)
if not ds_ref:
@@ -779,10 +807,9 @@ class HostSystem(ManagedObject):
self.set("config.network.pnic", net_info_pnic)
self.set("connected", connected)
- if _db_content.get("Network", None) is None:
+ if _no_objects_of_type("Network"):
create_network()
- net_ref = _db_content["Network"][
- list(_db_content["Network"].keys())[0]].obj
+ net_ref = get_first_object_ref("Network")
network_do = DataObject()
network_do.ManagedObjectReference = [net_ref]
self.set("network", network_do)
@@ -792,9 +819,9 @@ class HostSystem(ManagedObject):
vswitch_do.name = "vSwitch0"
vswitch_do.portgroup = ["PortGroup-vmnet0"]
- net_swicth = DataObject()
- net_swicth.HostVirtualSwitch = [vswitch_do]
- self.set("config.network.vswitch", net_swicth)
+ net_switch = DataObject()
+ net_switch.HostVirtualSwitch = [vswitch_do]
+ self.set("config.network.vswitch", net_switch)
host_pg_do = DataObject()
host_pg_do.key = "PortGroup-vmnet0"
@@ -821,7 +848,7 @@ class HostSystem(ManagedObject):
self.set("config.storageDevice.hostBusAdapter", host_bus_adapter_array)
# Set the same on the storage system managed object
- host_storage_sys = _get_object(host_storage_sys_key)
+ host_storage_sys = get_object(host_storage_sys_key)
host_storage_sys.set('storageDeviceInfo.hostBusAdapter',
host_bus_adapter_array)
@@ -882,17 +909,15 @@ class Datacenter(ManagedObject):
def __init__(self, name="ha-datacenter", ds_ref=None):
super(Datacenter, self).__init__("dc")
self.set("name", name)
- if _db_content.get("Folder", None) is None:
+ if _no_objects_of_type("Folder"):
create_folder()
- folder_ref = _db_content["Folder"][
- list(_db_content["Folder"].keys())[0]].obj
+ folder_ref = get_first_object_ref("Folder")
folder_do = DataObject()
folder_do.ManagedObjectReference = [folder_ref]
self.set("vmFolder", folder_ref)
- if _db_content.get("Network", None) is None:
+ if _no_objects_of_type("Network"):
create_network()
- net_ref = _db_content["Network"][
- list(_db_content["Network"].keys())[0]].obj
+ net_ref = get_first_object_ref("Network")
network_do = DataObject()
network_do.ManagedObjectReference = [net_ref]
self.set("network", network_do)
@@ -927,54 +952,56 @@ class Task(ManagedObject):
def create_host_network_system():
host_net_system = HostNetworkSystem()
- _create_object("HostNetworkSystem", host_net_system)
+ _create_object(host_net_system)
def create_host_storage_system():
host_storage_system = HostStorageSystem()
- _create_object("HostStorageSystem", host_storage_system)
+ _create_object(host_storage_system)
def create_host(ds_ref=None):
host_system = HostSystem(ds_ref=ds_ref)
- _create_object('HostSystem', host_system)
+ _create_object(host_system)
def create_datacenter(name, ds_ref=None):
data_center = Datacenter(name, ds_ref)
- _create_object('Datacenter', data_center)
+ _create_object(data_center)
def create_datastore(name, capacity, free):
data_store = Datastore(name, capacity, free)
- _create_object('Datastore', data_store)
+ _create_object(data_store)
return data_store.obj
def create_res_pool():
res_pool = ResourcePool()
- _create_object('ResourcePool', res_pool)
+ _create_object(res_pool)
return res_pool.obj
def create_folder():
folder = Folder()
- _create_object('Folder', folder)
+ _create_object(folder)
return folder.obj
def create_network():
network = Network()
- _create_object('Network', network)
+ _create_object(network)
def create_cluster(name, ds_ref):
cluster = ClusterComputeResource(name=name)
- cluster._add_host(_get_object_refs("HostSystem")[0])
- cluster._add_host(_get_object_refs("HostSystem")[1])
+ for i, host in enumerate(get_object_refs("HostSystem")):
+ cluster._add_host(host)
+ if i >= 1:
+ break
cluster._add_datastore(ds_ref)
cluster._add_root_resource_pool(create_res_pool())
- _create_object('ClusterComputeResource', cluster)
+ _create_object(cluster)
return cluster
@@ -993,16 +1020,15 @@ def create_vm(uuid=None, name=None,
devices = []
if vmPathName is None:
- vm_path = ds_obj.DatastorePath(
- list(_db_content['Datastore'].values())[0])
+ vm_path = ds_obj.DatastorePath(get_first_object("Datastore"))
else:
vm_path = ds_obj.DatastorePath.parse(vmPathName)
if res_pool_ref is None:
- res_pool_ref = list(_db_content['ResourcePool'].keys())[0]
+ res_pool_ref = get_first_object_ref("ResourcePool")
if host_ref is None:
- host_ref = list(_db_content["HostSystem"].keys())[0]
+ host_ref = get_first_object_ref("HostSystem")
# Fill in the default path to the vmx file if we were only given a
# datastore. Note that if you create a VM with vmPathName '[foo]', when you
@@ -1011,9 +1037,9 @@ def create_vm(uuid=None, name=None,
if vm_path.rel_path == '':
vm_path = vm_path.join(name, name + '.vmx')
- for key, value in _db_content["Datastore"].items():
+ for value in get_objects("Datastore"):
if value.get('summary.name') == vm_path.datastore:
- ds = key
+ ds = value.obj
break
else:
ds = create_datastore(vm_path.datastore, 1024, 500)
@@ -1030,9 +1056,9 @@ def create_vm(uuid=None, name=None,
"instanceUuid": uuid,
"version": version}
vm = VirtualMachine(**vm_dict)
- _create_object("VirtualMachine", vm)
+ _create_object(vm)
- res_pool = _get_object(res_pool_ref)
+ res_pool = get_object(res_pool_ref)
res_pool.vm.ManagedObjectReference.append(vm.obj)
return vm.obj
@@ -1040,7 +1066,7 @@ def create_vm(uuid=None, name=None,
def create_task(task_name, state="running", result=None, error_fault=None):
task = Task(task_name, state, result, error_fault)
- _create_object("Task", task)
+ _create_object(task)
return task
@@ -1103,12 +1129,14 @@ def fake_fetch_image(context, instance, host, port, dc_name, ds_name,
def _get_vm_mdo(vm_ref):
"""Gets the Virtual Machine with the ref from the db."""
- if _db_content.get("VirtualMachine", None) is None:
+ vms = _db_content.get("VirtualMachine")
+ if not vms:
raise exception.NotFound("There is no VM registered")
- if vm_ref not in _db_content.get("VirtualMachine"):
+ try:
+ return vms[vm_ref.value]
+ except KeyError:
raise exception.NotFound("Virtual Machine with ref %s is not "
- "there" % vm_ref)
- return _db_content.get("VirtualMachine")[vm_ref]
+ "there" % vm_ref.value)
def _merge_extraconfig(existing, changes):
@@ -1354,11 +1382,10 @@ class FakeVim(object):
def _find_all_by_uuid(self, *args, **kwargs):
uuid = kwargs.get('uuid')
vm_refs = []
- for vm_ref in _db_content.get("VirtualMachine"):
- vm = _get_object(vm_ref)
+ for vm in get_objects("VirtualMachine"):
vm_uuid = vm.get("summary.config.instanceUuid")
if vm_uuid == uuid:
- vm_refs.append(vm_ref)
+ vm_refs.append(vm.obj)
return vm_refs
def _delete_snapshot(self, method, *args, **kwargs):
@@ -1412,7 +1439,7 @@ class FakeVim(object):
vm_dict["extra_config"] = extraConfigs
virtual_machine = VirtualMachine(**vm_dict)
- _create_object("VirtualMachine", virtual_machine)
+ _create_object(virtual_machine)
task_mdo = create_task(method, "success")
return task_mdo.obj
@@ -1420,7 +1447,7 @@ class FakeVim(object):
"""Unregisters a VM from the Host System."""
vm_ref = args[0]
_get_vm_mdo(vm_ref)
- del _db_content["VirtualMachine"][vm_ref]
+ del _db_content["VirtualMachine"][vm_ref.value]
task_mdo = create_task(method, "success")
return task_mdo.obj
@@ -1491,13 +1518,7 @@ class FakeVim(object):
def _set_power_state(self, method, vm_ref, pwr_state="poweredOn"):
"""Sets power state for the VM."""
- if _db_content.get("VirtualMachine", None) is None:
- raise exception.NotFound("No Virtual Machine has been "
- "registered yet")
- if vm_ref not in _db_content.get("VirtualMachine"):
- raise exception.NotFound("Virtual Machine with ref %s is not "
- "there" % vm_ref)
- vm_mdo = _db_content.get("VirtualMachine").get(vm_ref)
+ vm_mdo = _get_vm_mdo(vm_ref)
vm_mdo.set("runtime.powerState", pwr_state)
task_mdo = create_task(method, "success")
return task_mdo.obj
@@ -1526,7 +1547,7 @@ class FakeVim(object):
# This means that we are retrieving props for all managed
# data objects of the specified 'type' in the entire
# inventory. This gets invoked by vim_util.get_objects.
- mdo_refs = _db_content[spec_type]
+ mdo_refs = list(get_object_refs(spec_type))
elif obj_ref.type != spec_type:
# This means that we are retrieving props for the managed
# data objects in the parent object's 'path' property.
@@ -1536,7 +1557,7 @@ class FakeVim(object):
# path = 'datastore'
# the above will retrieve all datastores in the given
# cluster.
- parent_mdo = _db_content[obj_ref.type][obj_ref]
+ parent_mdo = get_object(obj_ref)
path = obj.selectSet[0].path
mdo_refs = parent_mdo.get(path).ManagedObjectReference
else:
@@ -1545,12 +1566,13 @@ class FakeVim(object):
# vim_util.get_properties_for_a_collection_of_objects.
mdo_refs = [obj_ref]
+ mdo_list = _db_content[spec_type]
for mdo_ref in mdo_refs:
- mdo = _db_content[spec_type][mdo_ref]
- prop_list = []
- for prop_name in properties:
- prop = Prop(prop_name, mdo.get(prop_name))
- prop_list.append(prop)
+ mdo = mdo_list[mdo_ref.value]
+ prop_list = [
+ Prop(prop_name, mdo.get(prop_name))
+ for prop_name in properties
+ ]
obj_content = ObjectContent(mdo.obj, prop_list)
lst_ret_objs.add_object(obj_content)
except Exception:
@@ -1560,14 +1582,13 @@ class FakeVim(object):
def _add_port_group(self, method, *args, **kwargs):
"""Adds a port group to the host system."""
- _host_sk = list(_db_content["HostSystem"].keys())[0]
- host_mdo = _db_content["HostSystem"][_host_sk]
+ host_mdo = get_first_object("HostSystem")
host_mdo._add_port_group(kwargs.get("portgrp"))
def _add_iscsi_send_tgt(self, method, *args, **kwargs):
"""Adds a iscsi send target to the hba."""
send_targets = kwargs.get('targets')
- host_storage_sys = _get_objects('HostStorageSystem').objects[0]
+ host_storage_sys = get_first_object('HostStorageSystem')
iscsi_hba_array = host_storage_sys.get('storageDeviceInfo'
'.hostBusAdapter')
iscsi_hba = iscsi_hba_array.HostHostBusAdapter[0]
diff --git a/nova/tests/unit/virt/vmwareapi/stubs.py b/nova/tests/unit/virt/vmwareapi/stubs.py
index d0caaae43e..a0406bdac5 100644
--- a/nova/tests/unit/virt/vmwareapi/stubs.py
+++ b/nova/tests/unit/virt/vmwareapi/stubs.py
@@ -36,7 +36,7 @@ def fake_vim_prop(arg):
return fake.get_fake_vim_object(arg)
-def fake_is_vim_object(arg, module):
+def fake_is_vim_object(module):
"""Stubs out the VMwareAPISession's is_vim_object method."""
return isinstance(module, fake.FakeVim)
@@ -74,9 +74,10 @@ def set_stubs(test):
fake.fake_upload_image)
test.stub_out('nova.virt.vmwareapi.images.fetch_image',
fake.fake_fetch_image)
- test.stub_out('nova.virt.vmwareapi.driver.VMwareAPISession.vim',
+ test.stub_out('nova.virt.vmwareapi.session.VMwareAPISession.vim',
fake_vim_prop)
- test.stub_out('nova.virt.vmwareapi.driver.VMwareAPISession._is_vim_object',
+ test.stub_out('nova.virt.vmwareapi.session.VMwareAPISession.'
+ '_is_vim_object',
fake_is_vim_object)
test.stub_out('nova.network.neutron.API.update_instance_vnic_index',
lambda *args, **kwargs: None)
diff --git a/nova/tests/unit/virt/vmwareapi/test_configdrive.py b/nova/tests/unit/virt/vmwareapi/test_configdrive.py
index de07444ddb..7e8b1c1b63 100644
--- a/nova/tests/unit/virt/vmwareapi/test_configdrive.py
+++ b/nova/tests/unit/virt/vmwareapi/test_configdrive.py
@@ -13,8 +13,9 @@
# License for the specific language governing permissions and limitations
# under the License.
+from unittest import mock
+
import fixtures
-import mock
from oslo_utils.fixture import uuidsentinel
from nova import context
diff --git a/nova/tests/unit/virt/vmwareapi/test_driver_api.py b/nova/tests/unit/virt/vmwareapi/test_driver_api.py
index 5889fb8239..ac473c8c09 100644
--- a/nova/tests/unit/virt/vmwareapi/test_driver_api.py
+++ b/nova/tests/unit/virt/vmwareapi/test_driver_api.py
@@ -21,9 +21,9 @@ Test suite for VMwareAPI.
import collections
import datetime
+from unittest import mock
from eventlet import greenthread
-import mock
import os_resource_classes as orc
from oslo_utils import fixture as utils_fixture
from oslo_utils.fixture import uuidsentinel
@@ -61,6 +61,7 @@ from nova.virt.vmwareapi import ds_util
from nova.virt.vmwareapi import error_util
from nova.virt.vmwareapi import imagecache
from nova.virt.vmwareapi import images
+from nova.virt.vmwareapi import session
from nova.virt.vmwareapi import vif
from nova.virt.vmwareapi import vim_util
from nova.virt.vmwareapi import vm_util
@@ -109,20 +110,11 @@ DEFAULT_FLAVOR_OBJS = [
]
-def _fake_create_session(inst):
- session = vmwareapi_fake.DataObject()
- session.key = 'fake_key'
- session.userName = 'fake_username'
- session._pbm_wsdl_loc = None
- session._pbm = None
- inst._session = session
-
-
class VMwareDriverStartupTestCase(test.NoDBTestCase):
def _start_driver_with_flags(self, expected_exception_type, startup_flags):
self.flags(**startup_flags)
with mock.patch(
- 'nova.virt.vmwareapi.driver.VMwareAPISession.__init__'):
+ 'nova.virt.vmwareapi.session.VMwareAPISession.__init__'):
e = self.assertRaises(Exception, driver.VMwareVCDriver, None) # noqa
self.assertIs(type(e), expected_exception_type)
@@ -154,36 +146,6 @@ class VMwareDriverStartupTestCase(test.NoDBTestCase):
group='vmware'))
-class VMwareSessionTestCase(test.NoDBTestCase):
-
- @mock.patch.object(driver.VMwareAPISession, '_is_vim_object',
- return_value=False)
- def test_call_method(self, mock_is_vim):
- with test.nested(
- mock.patch.object(driver.VMwareAPISession, '_create_session',
- _fake_create_session),
- mock.patch.object(driver.VMwareAPISession, 'invoke_api'),
- ) as (fake_create, fake_invoke):
- session = driver.VMwareAPISession()
- session._vim = mock.Mock()
- module = mock.Mock()
- session._call_method(module, 'fira')
- fake_invoke.assert_called_once_with(module, 'fira', session._vim)
-
- @mock.patch.object(driver.VMwareAPISession, '_is_vim_object',
- return_value=True)
- def test_call_method_vim(self, mock_is_vim):
- with test.nested(
- mock.patch.object(driver.VMwareAPISession, '_create_session',
- _fake_create_session),
- mock.patch.object(driver.VMwareAPISession, 'invoke_api'),
- ) as (fake_create, fake_invoke):
- session = driver.VMwareAPISession()
- module = mock.Mock()
- session._call_method(module, 'fira')
- fake_invoke.assert_called_once_with(module, 'fira')
-
-
class VMwareAPIVMTestCase(test.NoDBTestCase,
test_diagnostics.DiagnosticsComparisonMixin):
"""Unit tests for Vmware API connection calls."""
@@ -337,7 +299,7 @@ class VMwareAPIVMTestCase(test.NoDBTestCase,
_fake_check_session)
with mock.patch.object(greenthread, 'sleep'):
- self.conn = driver.VMwareAPISession()
+ self.conn = session.VMwareAPISession()
self.assertEqual(2, self.attempts)
def _get_flavor_by_name(self, type):
@@ -411,8 +373,8 @@ class VMwareAPIVMTestCase(test.NoDBTestCase,
def _get_vm_record(self):
# Get record for VM
- vms = vmwareapi_fake._get_objects("VirtualMachine")
- for vm in vms.objects:
+ vms = vmwareapi_fake.get_objects("VirtualMachine")
+ for vm in vms:
if vm.get('name') == vm_util._get_vm_name(self._display_name,
self.uuid):
return vm
@@ -1307,7 +1269,7 @@ class VMwareAPIVMTestCase(test.NoDBTestCase,
def _snapshot_delete_vm_snapshot_exception(self, exception, call_count=1):
self._create_vm()
- fake_vm = vmwareapi_fake._get_objects("VirtualMachine").objects[0].obj
+ fake_vm = vmwareapi_fake.get_first_object_ref("VirtualMachine")
snapshot_ref = vmwareapi_fake.ManagedObjectReference(
value="Snapshot-123",
name="VirtualMachineSnapshot")
@@ -1801,8 +1763,7 @@ class VMwareAPIVMTestCase(test.NoDBTestCase,
get_vm_ref.assert_called_once_with(self.conn._session,
self.instance)
- get_volume_ref.assert_called_once_with(
- connection_info['data']['volume'])
+ get_volume_ref.assert_called_once_with(connection_info['data'])
self.assertTrue(get_vmdk_info.called)
attach_disk_to_vm.assert_called_once_with(mock.sentinel.vm_ref,
self.instance, adapter_type, disk_type, vmdk_path='fake-path')
@@ -1878,8 +1839,8 @@ class VMwareAPIVMTestCase(test.NoDBTestCase,
def test_iscsi_rescan_hba(self):
fake_target_portal = 'fake_target_host:port'
- host_storage_sys = vmwareapi_fake._get_objects(
- "HostStorageSystem").objects[0]
+ host_storage_sys = vmwareapi_fake.get_first_object(
+ "HostStorageSystem")
iscsi_hba_array = host_storage_sys.get('storageDeviceInfo'
'.hostBusAdapter')
iscsi_hba = iscsi_hba_array.HostHostBusAdapter[0]
@@ -1899,7 +1860,7 @@ class VMwareAPIVMTestCase(test.NoDBTestCase,
def test_iscsi_get_target(self):
data = {'target_portal': 'fake_target_host:port',
'target_iqn': 'fake_target_iqn'}
- host = vmwareapi_fake._get_objects('HostSystem').objects[0]
+ host = vmwareapi_fake.get_first_object('HostSystem')
host._add_iscsi_target(data)
vops = volumeops.VMwareVolumeOps(self.conn._session)
result = vops._iscsi_get_target(data)
@@ -2162,7 +2123,7 @@ class VMwareAPIVMTestCase(test.NoDBTestCase,
'min_unit': 1,
'max_unit': 16,
'step_size': 1,
- 'allocation_ratio': 16.0,
+ 'allocation_ratio': 4.0,
},
orc.MEMORY_MB: {
'total': 2048,
@@ -2170,7 +2131,7 @@ class VMwareAPIVMTestCase(test.NoDBTestCase,
'min_unit': 1,
'max_unit': 1024,
'step_size': 1,
- 'allocation_ratio': 1.5,
+ 'allocation_ratio': 1.0,
},
orc.DISK_GB: {
'total': 95,
diff --git a/nova/tests/unit/virt/vmwareapi/test_ds_util.py b/nova/tests/unit/virt/vmwareapi/test_ds_util.py
index 3b909642fb..1716027afb 100644
--- a/nova/tests/unit/virt/vmwareapi/test_ds_util.py
+++ b/nova/tests/unit/virt/vmwareapi/test_ds_util.py
@@ -14,8 +14,8 @@
from contextlib import contextmanager
import re
+from unittest import mock
-import mock
from oslo_utils import units
from oslo_vmware import exceptions as vexc
from oslo_vmware.objects import datastore as ds_obj
diff --git a/nova/tests/unit/virt/vmwareapi/test_imagecache.py b/nova/tests/unit/virt/vmwareapi/test_imagecache.py
index 485b1ea4cd..1116804d2f 100644
--- a/nova/tests/unit/virt/vmwareapi/test_imagecache.py
+++ b/nova/tests/unit/virt/vmwareapi/test_imagecache.py
@@ -13,8 +13,8 @@
# under the License.
import datetime
+from unittest import mock
-import mock
from oslo_config import cfg
from oslo_utils import fixture as utils_fixture
from oslo_utils.fixture import uuidsentinel
diff --git a/nova/tests/unit/virt/vmwareapi/test_images.py b/nova/tests/unit/virt/vmwareapi/test_images.py
index 7cfec00c97..20abc063a0 100644
--- a/nova/tests/unit/virt/vmwareapi/test_images.py
+++ b/nova/tests/unit/virt/vmwareapi/test_images.py
@@ -17,8 +17,8 @@ Test suite for images.
import os
import tarfile
+from unittest import mock
-import mock
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import units
from oslo_vmware import rw_handles
@@ -117,13 +117,11 @@ class VMwareImagesTestCase(test.NoDBTestCase):
mock.patch.object(images.IMAGE_API, 'download'),
mock.patch.object(images, 'image_transfer'),
mock.patch.object(images, '_build_shadow_vm_config_spec'),
- mock.patch.object(session, '_call_method'),
mock.patch.object(vm_util, 'get_vmdk_info')
) as (mock_image_api_get,
mock_image_api_download,
mock_image_transfer,
mock_build_shadow_vm_config_spec,
- mock_call_method,
mock_get_vmdk_info):
image_data = {'id': 'fake-id',
'disk_format': 'vmdk',
@@ -172,7 +170,7 @@ class VMwareImagesTestCase(test.NoDBTestCase):
mock_write_handle)
mock_get_vmdk_info.assert_called_once_with(
session, mock.sentinel.vm_ref, 'fake-vm')
- mock_call_method.assert_called_once_with(
+ session._call_method.assert_called_once_with(
session.vim, "UnregisterVM", mock.sentinel.vm_ref)
@mock.patch('oslo_vmware.rw_handles.ImageReadHandle')
@@ -188,13 +186,11 @@ class VMwareImagesTestCase(test.NoDBTestCase):
mock.patch.object(images.IMAGE_API, 'download'),
mock.patch.object(images, 'image_transfer'),
mock.patch.object(images, '_build_shadow_vm_config_spec'),
- mock.patch.object(session, '_call_method'),
mock.patch.object(vm_util, 'get_vmdk_info')
) as (mock_image_api_get,
mock_image_api_download,
mock_image_transfer,
mock_build_shadow_vm_config_spec,
- mock_call_method,
mock_get_vmdk_info):
image_data = {'id': 'fake-id',
'disk_format': 'vmdk',
@@ -220,7 +216,7 @@ class VMwareImagesTestCase(test.NoDBTestCase):
mock_image_transfer.assert_called_once_with(mock_read_handle,
mock_write_handle)
- mock_call_method.assert_called_once_with(
+ session._call_method.assert_called_once_with(
session.vim, "UnregisterVM", mock.sentinel.vm_ref)
mock_get_vmdk_info.assert_called_once_with(
session, mock.sentinel.vm_ref, 'fake-vm')
diff --git a/nova/tests/unit/virt/vmwareapi/test_network_util.py b/nova/tests/unit/virt/vmwareapi/test_network_util.py
index 10f2583946..b3b5bb15ea 100644
--- a/nova/tests/unit/virt/vmwareapi/test_network_util.py
+++ b/nova/tests/unit/virt/vmwareapi/test_network_util.py
@@ -15,15 +15,15 @@
# under the License.
import collections
+from unittest import mock
-import mock
from oslo_vmware import vim_util
from nova import test
from nova.tests.unit.virt.vmwareapi import fake
from nova.tests.unit.virt.vmwareapi import stubs
-from nova.virt.vmwareapi import driver
from nova.virt.vmwareapi import network_util
+from nova.virt.vmwareapi import session
ResultSet = collections.namedtuple('ResultSet', ['objects'])
@@ -36,12 +36,12 @@ class GetNetworkWithTheNameTestCase(test.NoDBTestCase):
def setUp(self):
super(GetNetworkWithTheNameTestCase, self).setUp()
fake.reset()
- self.stub_out('nova.virt.vmwareapi.driver.VMwareAPISession.vim',
+ self.stub_out('nova.virt.vmwareapi.session.VMwareAPISession.vim',
stubs.fake_vim_prop)
- self.stub_out('nova.virt.vmwareapi.driver.'
+ self.stub_out('nova.virt.vmwareapi.session.'
'VMwareAPISession.is_vim_object',
stubs.fake_is_vim_object)
- self._session = driver.VMwareAPISession()
+ self._session = session.VMwareAPISession()
def _build_cluster_networks(self, networks):
"""Returns a set of results for a cluster network lookup.
diff --git a/nova/tests/unit/virt/vmwareapi/test_session.py b/nova/tests/unit/virt/vmwareapi/test_session.py
new file mode 100644
index 0000000000..6088e1f5b2
--- /dev/null
+++ b/nova/tests/unit/virt/vmwareapi/test_session.py
@@ -0,0 +1,208 @@
+# Copyright (c) 2022 SAP SE
+# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
+# Copyright (c) 2012 VMware, Inc.
+# Copyright (c) 2011 Citrix Systems, Inc.
+# Copyright 2011 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+"""
+Test suite for VMwareAPI Session
+"""
+
+from unittest import mock
+
+from oslo_vmware import exceptions as vexec
+
+from nova import test
+from nova.tests.unit.virt.vmwareapi import fake as vmwareapi_fake
+from nova.virt.vmwareapi import session
+
+
+def _fake_create_session(inst):
+ _session = vmwareapi_fake.DataObject()
+ _session.key = 'fake_key'
+ _session.userName = 'fake_username'
+ _session._pbm_wsdl_loc = None
+ _session._pbm = None
+ inst._session = _session
+
+
+def _fake_fetch_moref_impl(inst, _):
+ inst.moref = vmwareapi_fake.ManagedObjectReference(
+ value=mock.sentinel.moref2)
+
+
+class FakeStableMoRefProxy(session.StableMoRefProxy):
+ def __init__(self, ref=None):
+ super(FakeStableMoRefProxy, self).__init__(
+ ref or vmwareapi_fake.ManagedObjectReference(
+ value=mock.sentinel.moref))
+
+ def fetch_moref(self, session):
+ pass
+
+ def __repr__(self):
+ return "FakeStableMoRefProxy({!r})".format(self.moref)
+
+
+class StableMoRefProxyTestCase(test.NoDBTestCase):
+ def test_proxy(self):
+ ref = FakeStableMoRefProxy()
+ self.assertEqual(mock.sentinel.moref, ref.value)
+ self.assertEqual("ManagedObject", ref._type)
+
+ def test_proxy_classes(self):
+ # Necessary for suds serialisation
+ ref = FakeStableMoRefProxy()
+ self.assertEqual("ManagedObjectReference", ref.__class__.__name__)
+
+
+class VMwareSessionTestCase(test.NoDBTestCase):
+
+ @mock.patch.object(session.VMwareAPISession, '_is_vim_object',
+ return_value=False)
+ def test_call_method(self, mock_is_vim):
+ with test.nested(
+ mock.patch.object(session.VMwareAPISession,
+ '_create_session',
+ _fake_create_session),
+ mock.patch.object(session.VMwareAPISession,
+ 'invoke_api'),
+ ) as (fake_create, fake_invoke):
+ _session = session.VMwareAPISession()
+ _session._vim = mock.Mock()
+ module = mock.Mock()
+ _session._call_method(module, 'fira')
+ fake_invoke.assert_called_once_with(module, 'fira', _session._vim)
+
+ @mock.patch.object(session.VMwareAPISession, '_is_vim_object',
+ return_value=True)
+ def test_call_method_vim(self, mock_is_vim):
+ with test.nested(
+ mock.patch.object(session.VMwareAPISession,
+ '_create_session',
+ _fake_create_session),
+ mock.patch.object(session.VMwareAPISession,
+ 'invoke_api'),
+ ) as (fake_create, fake_invoke):
+ _session = session.VMwareAPISession()
+ module = mock.Mock()
+ _session._call_method(module, 'fira')
+ fake_invoke.assert_called_once_with(module, 'fira')
+
+ @mock.patch.object(session.VMwareAPISession, '_is_vim_object',
+ return_value=True)
+ def test_call_method_no_recovery(self, mock_is_vim):
+ with test.nested(
+ mock.patch.object(session.VMwareAPISession, '_create_session',
+ _fake_create_session),
+ mock.patch.object(session.VMwareAPISession, 'invoke_api'),
+ mock.patch.object(FakeStableMoRefProxy, 'fetch_moref'),
+ ) as (fake_create, fake_invoke, fake_fetch_moref):
+ _session = session.VMwareAPISession()
+ module = mock.Mock()
+ ref = FakeStableMoRefProxy()
+
+ _session._call_method(
+ module, mock.sentinel.method_arg, ref, ref=ref)
+
+ fake_invoke.assert_called_once_with(
+ module, mock.sentinel.method_arg, ref, ref=ref)
+ fake_fetch_moref.assert_not_called()
+
+ @mock.patch.object(session.VMwareAPISession, '_is_vim_object',
+ return_value=True)
+ def test_call_method_recovery_arg_failed(self, mock_is_vim):
+ with test.nested(
+ mock.patch.object(session.VMwareAPISession, '_create_session',
+ _fake_create_session),
+ mock.patch.object(session.VMwareAPISession, 'invoke_api'),
+ mock.patch.object(FakeStableMoRefProxy, 'fetch_moref'),
+ ) as (fake_create, fake_invoke, fake_fetch_moref):
+ _session = session.VMwareAPISession()
+ module = mock.Mock()
+ ref = FakeStableMoRefProxy()
+ fake_invoke.side_effect = [vexec.ManagedObjectNotFoundException]
+
+ self.assertRaises(vexec.ManagedObjectNotFoundException,
+ _session._call_method, module, mock.sentinel.method_arg, ref)
+
+ fake_invoke.assert_called_once_with(
+ module, mock.sentinel.method_arg, ref)
+ fake_fetch_moref.assert_not_called()
+
+ @mock.patch.object(session.VMwareAPISession, '_is_vim_object',
+ return_value=True)
+ def test_call_method_recovery_kwarg_failed(self, mock_is_vim):
+ with test.nested(
+ mock.patch.object(session.VMwareAPISession, '_create_session',
+ _fake_create_session),
+ mock.patch.object(session.VMwareAPISession, 'invoke_api'),
+ mock.patch.object(FakeStableMoRefProxy, 'fetch_moref'),
+ ) as (fake_create, fake_invoke, fake_fetch_moref):
+ _session = session.VMwareAPISession()
+ module = mock.Mock()
+ ref = FakeStableMoRefProxy()
+ fake_invoke.side_effect = [vexec.ManagedObjectNotFoundException]
+
+ self.assertRaises(vexec.ManagedObjectNotFoundException,
+ _session._call_method, module,
+ mock.sentinel.method_arg, ref=ref)
+
+ fake_invoke.assert_called_once_with(
+ module, mock.sentinel.method_arg, ref=ref)
+ fake_fetch_moref.assert_not_called()
+
+ @mock.patch.object(session.VMwareAPISession, '_is_vim_object',
+ return_value=True)
+ def test_call_method_recovery_arg_success(self, mock_is_vim):
+ with test.nested(
+ mock.patch.object(session.VMwareAPISession, '_create_session',
+ _fake_create_session),
+ mock.patch.object(session.VMwareAPISession, 'invoke_api'),
+ mock.patch.object(FakeStableMoRefProxy,
+ 'fetch_moref', _fake_fetch_moref_impl),
+ ) as (fake_create, fake_invoke, fake_fetch_moref):
+ _session = session.VMwareAPISession()
+ module = mock.Mock()
+ ref = FakeStableMoRefProxy()
+
+ fake_invoke.side_effect = [vexec.ManagedObjectNotFoundException(
+ details=dict(obj=mock.sentinel.moref),
+ ), None]
+ _session._call_method(module, mock.sentinel.method_arg, ref)
+ fake_invoke.assert_called_with(
+ module, mock.sentinel.method_arg, ref)
+
+ @mock.patch.object(session.VMwareAPISession, '_is_vim_object',
+ return_value=True)
+ def test_call_method_recovery_kwarg_success(self, mock_is_vim):
+ with test.nested(
+ mock.patch.object(session.VMwareAPISession, '_create_session',
+ _fake_create_session),
+ mock.patch.object(session.VMwareAPISession, 'invoke_api'),
+ mock.patch.object(FakeStableMoRefProxy,
+ 'fetch_moref', _fake_fetch_moref_impl),
+ ) as (fake_create, fake_invoke, fake_fetch_moref):
+ _session = session.VMwareAPISession()
+ module = mock.Mock()
+ ref = FakeStableMoRefProxy()
+
+ fake_invoke.side_effect = [vexec.ManagedObjectNotFoundException(
+ details=dict(obj=mock.sentinel.moref),
+ ), None]
+ _session._call_method(module, mock.sentinel.method_arg, ref=ref)
+ fake_invoke.assert_called_with(
+ module, mock.sentinel.method_arg, ref=ref)
diff --git a/nova/tests/unit/virt/vmwareapi/test_vif.py b/nova/tests/unit/virt/vmwareapi/test_vif.py
index b0fb9df47c..02d516fac7 100644
--- a/nova/tests/unit/virt/vmwareapi/test_vif.py
+++ b/nova/tests/unit/virt/vmwareapi/test_vif.py
@@ -13,7 +13,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from oslo_vmware import vim_util
from nova import exception
diff --git a/nova/tests/unit/virt/vmwareapi/test_vim_util.py b/nova/tests/unit/virt/vmwareapi/test_vim_util.py
index ebfa2010ee..b3057a99ac 100644
--- a/nova/tests/unit/virt/vmwareapi/test_vim_util.py
+++ b/nova/tests/unit/virt/vmwareapi/test_vim_util.py
@@ -28,11 +28,11 @@ class VMwareVIMUtilTestCase(test.NoDBTestCase):
def test_get_inner_objects(self):
property = ['summary.name']
# Get the fake datastores directly from the cluster
- cluster_refs = fake._get_object_refs('ClusterComputeResource')
- cluster = fake._get_object(cluster_refs[0])
+ cluster = fake.get_first_object('ClusterComputeResource')
+ cluster_ref = cluster.obj
expected_ds = cluster.datastore.ManagedObjectReference
# Get the fake datastores using inner objects utility method
result = vim_util.get_inner_objects(
- self.vim, cluster_refs[0], 'datastore', 'Datastore', property)
+ self.vim, cluster_ref, 'datastore', 'Datastore', property)
datastores = [oc.obj for oc in result.objects]
self.assertEqual(expected_ds, datastores)
diff --git a/nova/tests/unit/virt/vmwareapi/test_vm_util.py b/nova/tests/unit/virt/vmwareapi/test_vm_util.py
index ea30895a4d..82fa07a882 100644
--- a/nova/tests/unit/virt/vmwareapi/test_vm_util.py
+++ b/nova/tests/unit/virt/vmwareapi/test_vm_util.py
@@ -15,14 +15,15 @@
# under the License.
import collections
+from unittest import mock
-import mock
from oslo_service import fixture as oslo_svc_fixture
from oslo_utils import units
from oslo_utils import uuidutils
from oslo_vmware import exceptions as vexc
from oslo_vmware.objects import datastore as ds_obj
from oslo_vmware import pbm
+from oslo_vmware import vim_util as vutil
from nova import exception
from nova.network import model as network_model
@@ -31,7 +32,7 @@ from nova.tests.unit import fake_instance
from nova.tests.unit.virt.vmwareapi import fake
from nova.tests.unit.virt.vmwareapi import stubs
from nova.virt.vmwareapi import constants
-from nova.virt.vmwareapi import driver
+from nova.virt.vmwareapi import session as vmware_session
from nova.virt.vmwareapi import vm_util
@@ -375,7 +376,7 @@ class VMwareVMUtilTestCase(test.NoDBTestCase):
ide_controller = fake.VirtualIDEController()
devices.append(scsi_controller)
devices.append(ide_controller)
- fake._update_object("VirtualMachine", vm)
+ fake.update_object(vm)
# return the scsi type, not ide
self.assertEqual(constants.DEFAULT_ADAPTER_TYPE,
vm_util.get_scsi_adapter_type(devices))
@@ -387,7 +388,7 @@ class VMwareVMUtilTestCase(test.NoDBTestCase):
ide_controller = fake.VirtualIDEController()
devices.append(scsi_controller)
devices.append(ide_controller)
- fake._update_object("VirtualMachine", vm)
+ fake.update_object(vm)
# the controller is not suitable since the device under this controller
# has exceeded SCSI_MAX_CONNECT_NUMBER
for i in range(0, constants.SCSI_MAX_CONNECT_NUMBER):
@@ -1036,7 +1037,7 @@ class VMwareVMUtilTestCase(test.NoDBTestCase):
found[0] = True
mock_log_warn.side_effect = fake_log_warn
- session = driver.VMwareAPISession()
+ session = vmware_session.VMwareAPISession()
config_spec = vm_util.get_vm_create_spec(
session.vim.client.factory,
@@ -1987,23 +1988,85 @@ class VMwareVMUtilTestCase(test.NoDBTestCase):
mock_get_name.assert_called_once_with(self._instance.display_name,
self._instance.uuid)
-
-@mock.patch.object(driver.VMwareAPISession, 'vim', stubs.fake_vim_prop)
+ def test_create_fcd_id_obj(self):
+ fcd_id_obj = mock.Mock()
+ client_factory = mock.Mock()
+ client_factory.create.return_value = fcd_id_obj
+ fcd_id = mock.sentinel.fcd_id
+ ret = vm_util._create_fcd_id_obj(client_factory, fcd_id)
+
+ self.assertEqual(fcd_id_obj, ret)
+ self.assertEqual(fcd_id, ret.id)
+ client_factory.create.assert_called_once_with('ns0:ID')
+
+ @mock.patch.object(vm_util, '_create_fcd_id_obj')
+ @mock.patch.object(vutil, 'get_moref')
+ def test_attach_fcd(self, get_moref, create_fcd_id_obj):
+ disk_id = mock.sentinel.disk_id
+ create_fcd_id_obj.return_value = disk_id
+
+ ds_ref = mock.sentinel.ds_ref
+ get_moref.return_value = ds_ref
+
+ task = mock.sentinel.task
+ session = mock.Mock()
+ session._call_method.return_value = task
+
+ vm_ref = mock.sentinel.vm_ref
+ fcd_id = mock.sentinel.fcd_id
+ ds_ref_val = mock.sentinel.ds_ref_val
+ controller_key = mock.sentinel.controller_key
+ unit_number = mock.sentinel.unit_number
+ vm_util.attach_fcd(
+ session, vm_ref, fcd_id, ds_ref_val, controller_key, unit_number)
+
+ create_fcd_id_obj.assert_called_once_with(
+ session.vim.client.factory, fcd_id)
+ get_moref.assert_called_once_with(ds_ref_val, 'Datastore')
+ session._call_method.assert_called_once_with(
+ session.vim, "AttachDisk_Task", vm_ref, diskId=disk_id,
+ datastore=ds_ref, controllerKey=controller_key,
+ unitNumber=unit_number)
+ session._wait_for_task.assert_called_once_with(task)
+
+ @mock.patch.object(vm_util, '_create_fcd_id_obj')
+ def test_detach_fcd(self, create_fcd_id_obj):
+ disk_id = mock.sentinel.disk_id
+ create_fcd_id_obj.return_value = disk_id
+
+ task = mock.sentinel.task
+ session = mock.Mock()
+ session._call_method.return_value = task
+
+ vm_ref = mock.sentinel.vm_ref
+ fcd_id = mock.sentinel.fcd_id
+ vm_util.detach_fcd(session, vm_ref, fcd_id)
+
+ create_fcd_id_obj.assert_called_once_with(
+ session.vim.client.factory, fcd_id)
+ session._call_method.assert_called_once_with(
+ session.vim, "DetachDisk_Task", vm_ref, diskId=disk_id)
+ session._wait_for_task.assert_called_once_with(task)
+
+
+@mock.patch.object(vmware_session.VMwareAPISession, 'vim',
+ stubs.fake_vim_prop)
class VMwareVMUtilGetHostRefTestCase(test.NoDBTestCase):
# N.B. Mocking on the class only mocks test_*(), but we need
- # VMwareAPISession.vim to be mocked in both setUp and tests. Not mocking in
- # setUp causes object initialisation to fail. Not mocking in tests results
- # in vim calls not using FakeVim.
- @mock.patch.object(driver.VMwareAPISession, 'vim', stubs.fake_vim_prop)
+ # session.VMwareAPISession.vim to be mocked in both setUp and tests.
+ # Not mocking in setUp causes object initialisation to fail. Not
+ # mocking in tests results in vim calls not using FakeVim.
+ @mock.patch.object(vmware_session.VMwareAPISession, 'vim',
+ stubs.fake_vim_prop)
def setUp(self):
super(VMwareVMUtilGetHostRefTestCase, self).setUp()
fake.reset()
vm_util.vm_refs_cache_reset()
- self.session = driver.VMwareAPISession()
+ self.session = vmware_session.VMwareAPISession()
# Create a fake VirtualMachine running on a known host
- self.host_ref = list(fake._db_content['HostSystem'].keys())[0]
+ self.host_ref = fake.get_first_object_ref("HostSystem")
self.vm_ref = fake.create_vm(host_ref=self.host_ref)
@mock.patch.object(vm_util, 'get_vm_ref')
@@ -2019,7 +2082,7 @@ class VMwareVMUtilGetHostRefTestCase(test.NoDBTestCase):
def test_get_host_name_for_vm(self, mock_get_vm_ref):
mock_get_vm_ref.return_value = self.vm_ref
- host = fake._get_object(self.host_ref)
+ host = fake.get_object(self.host_ref)
ret = vm_util.get_host_name_for_vm(self.session, 'fake-instance')
diff --git a/nova/tests/unit/virt/vmwareapi/test_vmops.py b/nova/tests/unit/virt/vmwareapi/test_vmops.py
index f84c113758..19990b8b32 100644
--- a/nova/tests/unit/virt/vmwareapi/test_vmops.py
+++ b/nova/tests/unit/virt/vmwareapi/test_vmops.py
@@ -14,8 +14,8 @@
# under the License.
import time
+from unittest import mock
-import mock
from oslo_serialization import jsonutils
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import units
@@ -37,9 +37,9 @@ from nova.tests.unit.virt.vmwareapi import stubs
from nova import version
from nova.virt import hardware
from nova.virt.vmwareapi import constants
-from nova.virt.vmwareapi import driver
from nova.virt.vmwareapi import ds_util
from nova.virt.vmwareapi import images
+from nova.virt.vmwareapi import session
from nova.virt.vmwareapi import vif
from nova.virt.vmwareapi import vim_util
from nova.virt.vmwareapi import vm_util
@@ -65,18 +65,20 @@ class VMwareVMOpsTestCase(test.NoDBTestCase):
self.flags(my_ip='',
flat_injected=True)
self._context = context.RequestContext('fake_user', 'fake_project')
- self._session = driver.VMwareAPISession()
+ self._session = session.VMwareAPISession()
self._virtapi = mock.Mock()
self._image_id = uuids.image
- fake_ds_ref = vmwareapi_fake.ManagedObjectReference(value='fake-ds')
+ fake_ds_ref = vmwareapi_fake.ManagedObjectReference(
+ name='Datastore', value='fake-ds')
self._ds = ds_obj.Datastore(
ref=fake_ds_ref, name='fake_ds',
capacity=10 * units.Gi,
freespace=10 * units.Gi)
self._dc_info = ds_util.DcInfo(
ref='fake_dc_ref', name='fake_dc',
- vmFolder='fake_vm_folder')
+ vmFolder=vmwareapi_fake.ManagedObjectReference(
+ name='Folder', value='fake_vm_folder'))
cluster = vmwareapi_fake.create_cluster('fake_cluster', fake_ds_ref)
self._uuid = uuids.foo
fake_info_cache = {
@@ -166,7 +168,7 @@ class VMwareVMOpsTestCase(test.NoDBTestCase):
"projectid:fake_project\n"
"projectname:None\n"
"flavor:name:m1.micro\n"
- "flavor:memory_mb:6\n"
+ "flavor:memory_mb:8\n"
"flavor:vcpus:28\n"
"flavor:ephemeral_gb:8128\n"
"flavor:root_gb:496\n"
@@ -297,7 +299,8 @@ class VMwareVMOpsTestCase(test.NoDBTestCase):
mock_save.assert_called_once_with()
self.assertEqual(50, self._instance.progress)
- @mock.patch.object(vm_util, 'get_vm_ref', return_value='fake_ref')
+ @mock.patch.object(vm_util, 'get_vm_ref',
+ return_value=vmwareapi_fake.ManagedObjectReference())
def test_get_info(self, mock_get_vm_ref):
result = {
'summary.config.numCpu': 4,
@@ -577,7 +580,7 @@ class VMwareVMOpsTestCase(test.NoDBTestCase):
vmware_tools_status="toolsOk",
succeeds=False)
- def test_clean_shutdown_no_vwaretools(self):
+ def test_clean_shutdown_no_vmwaretools(self):
self._test_clean_shutdown(timeout=10,
retry_interval=3,
returns_on=1,
@@ -1138,6 +1141,14 @@ class VMwareVMOpsTestCase(test.NoDBTestCase):
mock_attach_cdrom_to_vm.assert_called_once_with(
vm_ref, self._instance, self._ds.ref, str(upload_iso_path))
+ def test_prepare_for_spawn_invalid_ram(self):
+ instance = self._instance.obj_clone()
+ flavor = objects.Flavor(vcpus=1, memory_mb=6, ephemeral_gb=1,
+ swap=1024, extra_specs={})
+ instance.flavor = flavor
+ self.assertRaises(exception.InstanceUnacceptable,
+ self._vmops.prepare_for_spawn, instance)
+
@mock.patch('nova.image.glance.API.get')
@mock.patch.object(vmops.LOG, 'debug')
@mock.patch.object(vmops.VMwareVMOps, '_fetch_image_if_missing')
@@ -2051,7 +2062,7 @@ class VMwareVMOpsTestCase(test.NoDBTestCase):
extra_specs,
self._metadata)
- vm = vmwareapi_fake._get_object(vm_ref)
+ vm = vmwareapi_fake.get_object(vm_ref)
# Test basic VM parameters
self.assertEqual(self._instance.uuid, vm.name)
@@ -2074,7 +2085,7 @@ class VMwareVMOpsTestCase(test.NoDBTestCase):
datastores = vm.datastore.ManagedObjectReference
self.assertEqual(1, len(datastores))
- datastore = vmwareapi_fake._get_object(datastores[0])
+ datastore = vmwareapi_fake.get_object(datastores[0])
self.assertEqual(self._ds.name, datastore.get('summary.name'))
# Test that the VM's network is configured as specified
@@ -2176,7 +2187,7 @@ class VMwareVMOpsTestCase(test.NoDBTestCase):
def _validate_flavor_extra_specs(self, flavor_extra_specs, expected):
# Validate that the extra specs are parsed correctly
flavor = objects.Flavor(name='my-flavor',
- memory_mb=6,
+ memory_mb=8,
vcpus=28,
root_gb=496,
ephemeral_gb=8128,
@@ -2227,7 +2238,7 @@ class VMwareVMOpsTestCase(test.NoDBTestCase):
flavor_extra_specs = {'quota:cpu_limit': 7,
'quota:cpu_reservation': 6}
flavor = objects.Flavor(name='my-flavor',
- memory_mb=6,
+ memory_mb=8,
vcpus=28,
root_gb=496,
ephemeral_gb=8128,
@@ -2280,7 +2291,7 @@ class VMwareVMOpsTestCase(test.NoDBTestCase):
'quota:cpu_reservation': 6,
'hw_video:ram_max_mb': 100}
flavor = objects.Flavor(name='my-flavor',
- memory_mb=6,
+ memory_mb=8,
vcpus=28,
root_gb=496,
ephemeral_gb=8128,
@@ -2692,7 +2703,7 @@ class VMwareVMOpsTestCase(test.NoDBTestCase):
def test_get_storage_policy_none(self):
flavor = objects.Flavor(name='m1.small',
- memory_mb=6,
+ memory_mb=8,
vcpus=28,
root_gb=496,
ephemeral_gb=8128,
@@ -2706,7 +2717,7 @@ class VMwareVMOpsTestCase(test.NoDBTestCase):
def test_get_storage_policy_extra_specs(self):
extra_specs = {'vmware:storage_policy': 'flavor-policy'}
flavor = objects.Flavor(name='m1.small',
- memory_mb=6,
+ memory_mb=8,
vcpus=28,
root_gb=496,
ephemeral_gb=8128,
@@ -2781,7 +2792,7 @@ class VMwareVMOpsTestCase(test.NoDBTestCase):
def test_get_instance_metadata(self):
flavor = objects.Flavor(id=7,
name='m1.small',
- memory_mb=6,
+ memory_mb=8,
vcpus=28,
root_gb=496,
ephemeral_gb=8128,
@@ -2796,7 +2807,7 @@ class VMwareVMOpsTestCase(test.NoDBTestCase):
"projectid:fake_project\n"
"projectname:None\n"
"flavor:name:m1.small\n"
- "flavor:memory_mb:6\n"
+ "flavor:memory_mb:8\n"
"flavor:vcpus:28\n"
"flavor:ephemeral_gb:8128\n"
"flavor:root_gb:496\n"
@@ -2913,7 +2924,7 @@ class VMwareVMOpsTestCase(test.NoDBTestCase):
def test_get_cores_per_socket(self):
extra_specs = {'hw:cpu_sockets': 7}
flavor = objects.Flavor(name='m1.small',
- memory_mb=6,
+ memory_mb=8,
vcpus=28,
root_gb=496,
ephemeral_gb=8128,
diff --git a/nova/tests/unit/virt/vmwareapi/test_volumeops.py b/nova/tests/unit/virt/vmwareapi/test_volumeops.py
index 0a051d62f5..003cbb9283 100644
--- a/nova/tests/unit/virt/vmwareapi/test_volumeops.py
+++ b/nova/tests/unit/virt/vmwareapi/test_volumeops.py
@@ -12,7 +12,9 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
+import ddt
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_vmware import exceptions as oslo_vmw_exceptions
from oslo_vmware import vim_util as vutil
@@ -26,11 +28,12 @@ from nova.tests.unit import fake_instance
from nova.tests.unit.virt.vmwareapi import fake as vmwareapi_fake
from nova.tests.unit.virt.vmwareapi import stubs
from nova.virt.vmwareapi import constants
-from nova.virt.vmwareapi import driver
+from nova.virt.vmwareapi import session
from nova.virt.vmwareapi import vm_util
from nova.virt.vmwareapi import volumeops
+@ddt.ddt
class VMwareVolumeOpsTestCase(test.NoDBTestCase):
def setUp(self):
@@ -38,7 +41,7 @@ class VMwareVolumeOpsTestCase(test.NoDBTestCase):
super(VMwareVolumeOpsTestCase, self).setUp()
vmwareapi_fake.reset()
stubs.set_stubs(self)
- self._session = driver.VMwareAPISession()
+ self._session = session.VMwareAPISession()
self._context = context.RequestContext('fake_user', 'fake_project')
self._volumeops = volumeops.VMwareVolumeOps(self._session)
@@ -141,8 +144,7 @@ class VMwareVolumeOpsTestCase(test.NoDBTestCase):
get_vm_ref.assert_called_once_with(self._volumeops._session,
instance)
- get_volume_ref.assert_called_once_with(
- connection_info['data']['volume'])
+ get_volume_ref.assert_called_once_with(connection_info['data'])
self.assertTrue(get_vmdk_info.called)
get_vm_state.assert_called_once_with(self._volumeops._session,
instance)
@@ -265,8 +267,7 @@ class VMwareVolumeOpsTestCase(test.NoDBTestCase):
get_vm_ref.assert_called_once_with(self._volumeops._session,
instance)
- get_volume_ref.assert_called_once_with(
- connection_info['data']['volume'])
+ get_volume_ref.assert_called_once_with(connection_info['data'])
get_vmdk_backed_disk_device.assert_called_once_with(
mock.sentinel.vm_ref, connection_info['data'])
adapter_type = vm_util.CONTROLLER_TO_ADAPTER_TYPE.get(
@@ -315,8 +316,7 @@ class VMwareVolumeOpsTestCase(test.NoDBTestCase):
get_vm_ref.assert_called_once_with(self._volumeops._session,
instance)
- get_volume_ref.assert_called_once_with(
- connection_info['data']['volume'])
+ get_volume_ref.assert_called_once_with(connection_info['data'])
get_vmdk_backed_disk_device.assert_called_once_with(
mock.sentinel.vm_ref, connection_info['data'])
get_vm_state.assert_called_once_with(self._volumeops._session,
@@ -406,6 +406,57 @@ class VMwareVolumeOpsTestCase(test.NoDBTestCase):
get_rdm_disk.assert_called_once_with(hardware_devices, disk_uuid)
self.assertFalse(detach_disk_from_vm.called)
+ @mock.patch.object(vm_util, 'get_vm_ref')
+ @mock.patch.object(vm_util, 'get_vm_state')
+ @mock.patch.object(vm_util, 'detach_fcd')
+ def _test__detach_volume_fcd(
+ self, detach_fcd, get_vm_state, get_vm_ref,
+ adapter_type=constants.ADAPTER_TYPE_IDE, powered_off=True):
+ vm_ref = mock.sentinel.vm_ref
+ get_vm_ref.return_value = vm_ref
+
+ if adapter_type == constants.ADAPTER_TYPE_IDE:
+ get_vm_state.return_value = (
+ power_state.SHUTDOWN if powered_off else power_state.RUNNING)
+
+ fcd_id = mock.sentinel.fcd_id
+ ds_ref_val = mock.sentinel.ds_ref_val
+ connection_info = {'data': {'id': fcd_id,
+ 'ds_ref_val': ds_ref_val,
+ 'adapter_type': adapter_type}}
+ instance = mock.sentinel.instance
+
+ if adapter_type == constants.ADAPTER_TYPE_IDE and not powered_off:
+ self.assertRaises(exception.Invalid,
+ self._volumeops._detach_volume_fcd,
+ connection_info,
+ instance)
+ detach_fcd.assert_not_called()
+ else:
+ self._volumeops._detach_volume_fcd(connection_info, instance)
+ detach_fcd.assert_called_once_with(
+ self._volumeops._session, vm_ref, fcd_id)
+
+ @ddt.data(
+ constants.ADAPTER_TYPE_BUSLOGIC, constants.ADAPTER_TYPE_IDE,
+ constants.ADAPTER_TYPE_LSILOGICSAS, constants.ADAPTER_TYPE_PARAVIRTUAL)
+ def test_detach_volume_fcd_powered_off_instance(self, adapter_type):
+ self._test__detach_volume_fcd(adapter_type=adapter_type)
+
+ @ddt.data(
+ constants.ADAPTER_TYPE_BUSLOGIC, constants.ADAPTER_TYPE_IDE,
+ constants.ADAPTER_TYPE_LSILOGICSAS, constants.ADAPTER_TYPE_PARAVIRTUAL)
+ def test_detach_volume_fcd_powered_on_instance(self, adapter_type):
+ self._test__detach_volume_fcd(adapter_type=adapter_type,
+ powered_off=False)
+
+ @mock.patch.object(volumeops.VMwareVolumeOps, '_detach_volume_fcd')
+ def test_detach_volume_fcd(self, detach_volume_fcd):
+ connection_info = {'driver_volume_type': constants.DISK_FORMAT_FCD}
+ instance = mock.sentinel.instance
+ self._volumeops.detach_volume(connection_info, instance)
+ detach_volume_fcd.assert_called_once_with(connection_info, instance)
+
def _test_attach_volume_vmdk(self, adapter_type=None):
connection_info = {'driver_volume_type': constants.DISK_FORMAT_VMDK,
'serial': 'volume-fake-id',
@@ -444,8 +495,7 @@ class VMwareVolumeOpsTestCase(test.NoDBTestCase):
get_vm_ref.assert_called_once_with(self._volumeops._session,
self._instance)
- get_volume_ref.assert_called_once_with(
- connection_info['data']['volume'])
+ get_volume_ref.assert_called_once_with(connection_info['data'])
self.assertTrue(get_vmdk_info.called)
attach_disk_to_vm.assert_called_once_with(
vm_ref, self._instance, adapter_type,
@@ -498,6 +548,126 @@ class VMwareVolumeOpsTestCase(test.NoDBTestCase):
constants.ADAPTER_TYPE_PARAVIRTUAL):
self._test_attach_volume_vmdk(adapter_type)
+ @mock.patch.object(vm_util, 'allocate_controller_key_and_unit_number')
+ def test_get_controller_key_and_unit(
+ self, allocate_controller_key_and_unit_number):
+ key = mock.sentinel.key
+ unit = mock.sentinel.unit
+ allocate_controller_key_and_unit_number.return_value = (
+ key, unit, None)
+
+ with mock.patch.object(self._volumeops, '_session') as session:
+ devices = mock.sentinel.devices
+ session._call_method.return_value = devices
+
+ vm_ref = mock.sentinel.vm_ref
+ adapter_type = mock.sentinel.adapter_type
+ ret = self._volumeops._get_controller_key_and_unit(
+ vm_ref, adapter_type)
+ self.assertEqual((key, unit, None), ret)
+ session._call_method.assert_called_once_with(
+ vutil, 'get_object_property', vm_ref, 'config.hardware.device')
+ allocate_controller_key_and_unit_number.assert_called_once_with(
+ session.vim.client.factory, devices, adapter_type)
+
+ @mock.patch.object(volumeops.VMwareVolumeOps,
+ '_get_controller_key_and_unit')
+ @mock.patch.object(vm_util, 'reconfigure_vm')
+ @mock.patch.object(vm_util, 'attach_fcd')
+ def _test_attach_fcd(
+ self, attach_fcd, reconfigure_vm, get_controller_key_and_unit,
+ existing_controller=True):
+ key = mock.sentinel.key
+ unit = mock.sentinel.unit
+ spec = mock.sentinel.spec
+ if existing_controller:
+ get_controller_key_and_unit.return_value = (key, unit, None)
+ else:
+ get_controller_key_and_unit.side_effect = [(None, None, spec),
+ (key, unit, None)]
+
+ with mock.patch.object(self._volumeops, '_session') as session:
+ config_spec = mock.Mock()
+ session.vim.client.factory.create.return_value = config_spec
+
+ vm_ref = mock.sentinel.vm_ref
+ adapter_type = mock.sentinel.adapter_type
+ fcd_id = mock.sentinel.fcd_id
+ ds_ref_val = mock.sentinel.ds_ref_val
+ self._volumeops._attach_fcd(
+ vm_ref, adapter_type, fcd_id, ds_ref_val)
+
+ attach_fcd.assert_called_once_with(
+ session, vm_ref, fcd_id, ds_ref_val, key, unit)
+ if existing_controller:
+ get_controller_key_and_unit.assert_called_once_with(
+ vm_ref, adapter_type)
+ reconfigure_vm.assert_not_called()
+ else:
+ exp_calls = [mock.call(vm_ref, adapter_type),
+ mock.call(vm_ref, adapter_type)]
+ get_controller_key_and_unit.assert_has_calls(exp_calls)
+ self.assertEqual([spec], config_spec.deviceChange)
+ reconfigure_vm.assert_called_once_with(
+ session, vm_ref, config_spec)
+
+ def test_attach_fcd_using_existing_controller(self):
+ self._test_attach_fcd()
+
+ def test_attach_fcd_using_new_controller(self):
+ self._test_attach_fcd(existing_controller=False)
+
+ @mock.patch.object(vm_util, 'get_vm_ref')
+ @mock.patch.object(vm_util, 'get_vm_state')
+ @mock.patch.object(volumeops.VMwareVolumeOps, '_attach_fcd')
+ def _test__attach_volume_fcd(
+ self, attach_fcd, get_vm_state, get_vm_ref,
+ adapter_type=constants.ADAPTER_TYPE_IDE, powered_off=True):
+ vm_ref = mock.sentinel.vm_ref
+ get_vm_ref.return_value = vm_ref
+
+ if adapter_type == constants.ADAPTER_TYPE_IDE:
+ get_vm_state.return_value = (
+ power_state.SHUTDOWN if powered_off else power_state.RUNNING)
+
+ fcd_id = mock.sentinel.fcd_id
+ ds_ref_val = mock.sentinel.ds_ref_val
+ connection_info = {'data': {'id': fcd_id,
+ 'ds_ref_val': ds_ref_val,
+ 'adapter_type': adapter_type}}
+ instance = mock.sentinel.instance
+
+ if adapter_type == constants.ADAPTER_TYPE_IDE and not powered_off:
+ self.assertRaises(exception.Invalid,
+ self._volumeops._attach_volume_fcd,
+ connection_info,
+ instance)
+ attach_fcd.assert_not_called()
+ else:
+ self._volumeops._attach_volume_fcd(connection_info, instance)
+ attach_fcd.assert_called_once_with(
+ vm_ref, adapter_type, fcd_id, ds_ref_val)
+
+ @ddt.data(
+ constants.ADAPTER_TYPE_BUSLOGIC, constants.ADAPTER_TYPE_IDE,
+ constants.ADAPTER_TYPE_LSILOGICSAS, constants.ADAPTER_TYPE_PARAVIRTUAL)
+ def test_attach_volume_fcd_powered_off_instance(self, adapter_type):
+ self._test__attach_volume_fcd(adapter_type=adapter_type)
+
+ @ddt.data(
+ constants.ADAPTER_TYPE_BUSLOGIC, constants.ADAPTER_TYPE_IDE,
+ constants.ADAPTER_TYPE_LSILOGICSAS, constants.ADAPTER_TYPE_PARAVIRTUAL)
+ def test_attach_volume_fcd_powered_on_instance(self, adapter_type):
+ self._test__attach_volume_fcd(adapter_type=adapter_type,
+ powered_off=False)
+
+ @mock.patch.object(volumeops.VMwareVolumeOps, '_attach_volume_fcd')
+ def test_attach_volume_fcd(self, attach_volume_fcd):
+ connection_info = {'driver_volume_type': constants.DISK_FORMAT_FCD}
+ instance = mock.sentinel.instance
+ self._volumeops.attach_volume(connection_info, instance)
+ attach_volume_fcd.assert_called_once_with(connection_info, instance)
+
def test_attach_volume_iscsi(self):
for adapter_type in (None, constants.DEFAULT_ADAPTER_TYPE,
constants.ADAPTER_TYPE_BUSLOGIC,
diff --git a/nova/tests/unit/virt/zvm/__init__.py b/nova/tests/unit/virt/zvm/__init__.py
index e69de29bb2..a93e19e1be 100644
--- a/nova/tests/unit/virt/zvm/__init__.py
+++ b/nova/tests/unit/virt/zvm/__init__.py
@@ -0,0 +1,20 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import unittest
+
+try:
+ import zvmconnector # noqa: F401
+except ImportError:
+ raise unittest.SkipTest(
+ "The 'zVMCloudConnector' dependency is not installed."
+ )
diff --git a/nova/tests/unit/virt/zvm/test_driver.py b/nova/tests/unit/virt/zvm/test_driver.py
index 85a8a5227c..a5a129331d 100644
--- a/nova/tests/unit/virt/zvm/test_driver.py
+++ b/nova/tests/unit/virt/zvm/test_driver.py
@@ -13,8 +13,9 @@
# under the License.
import copy
-import mock
import os
+from unittest import mock
+
from oslo_utils.fixture import uuidsentinel
from nova.compute import provider_tree
diff --git a/nova/tests/unit/virt/zvm/test_guest.py b/nova/tests/unit/virt/zvm/test_guest.py
index 029f211ea4..c786270715 100644
--- a/nova/tests/unit/virt/zvm/test_guest.py
+++ b/nova/tests/unit/virt/zvm/test_guest.py
@@ -12,7 +12,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from nova.compute import power_state as compute_power_state
from nova import context
diff --git a/nova/tests/unit/virt/zvm/test_hypervisor.py b/nova/tests/unit/virt/zvm/test_hypervisor.py
index d2081d49e2..c816ca57f6 100644
--- a/nova/tests/unit/virt/zvm/test_hypervisor.py
+++ b/nova/tests/unit/virt/zvm/test_hypervisor.py
@@ -12,7 +12,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from nova import context
from nova import exception
diff --git a/nova/tests/unit/virt/zvm/test_utils.py b/nova/tests/unit/virt/zvm/test_utils.py
index 60893759b9..77747855f4 100644
--- a/nova/tests/unit/virt/zvm/test_utils.py
+++ b/nova/tests/unit/virt/zvm/test_utils.py
@@ -12,7 +12,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from zvmconnector import connector
diff --git a/nova/tests/unit/volume/test_cinder.py b/nova/tests/unit/volume/test_cinder.py
index 6aa89cafd5..e53ebe3cb8 100644
--- a/nova/tests/unit/volume/test_cinder.py
+++ b/nova/tests/unit/volume/test_cinder.py
@@ -13,13 +13,14 @@
# License for the specific language governing permissions and limitations
# under the License.
+from unittest import mock
+
from cinderclient import api_versions as cinder_api_versions
from cinderclient import exceptions as cinder_exception
from cinderclient.v3 import limits as cinder_limits
from keystoneauth1 import loading as ks_loading
from keystoneauth1 import session
from keystoneclient import exceptions as keystone_exception
-import mock
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import timeutils
@@ -520,16 +521,15 @@ class CinderApiTestCase(test.NoDBTestCase):
@mock.patch('nova.volume.cinder.cinderclient')
def test_attachment_delete_failed(self, mock_cinderclient, mock_log):
mock_cinderclient.return_value.attachments.delete.side_effect = (
- cinder_exception.NotFound(404, '404'))
+ cinder_exception.BadRequest(400, '400'))
attachment_id = uuids.attachment
- ex = self.assertRaises(exception.VolumeAttachmentNotFound,
+ ex = self.assertRaises(exception.InvalidInput,
self.api.attachment_delete,
self.ctx,
attachment_id)
- self.assertEqual(404, ex.code)
- self.assertIn(attachment_id, str(ex))
+ self.assertEqual(400, ex.code)
@mock.patch('nova.volume.cinder.cinderclient',
side_effect=exception.CinderAPIVersionNotAvailable(
@@ -546,6 +546,16 @@ class CinderApiTestCase(test.NoDBTestCase):
skip_version_check=True)
@mock.patch('nova.volume.cinder.cinderclient')
+ def test_attachment_delete_not_found(self, mock_cinderclient):
+ mock_cinderclient.return_value.attachments.delete.side_effect = (
+ cinder_exception.ClientException(404))
+
+ attachment_id = uuids.attachment
+ self.api.attachment_delete(self.ctx, attachment_id)
+
+ self.assertEqual(1, mock_cinderclient.call_count)
+
+ @mock.patch('nova.volume.cinder.cinderclient')
def test_attachment_delete_internal_server_error(self, mock_cinderclient):
mock_cinderclient.return_value.attachments.delete.side_effect = (
cinder_exception.ClientException(500))
@@ -569,6 +579,29 @@ class CinderApiTestCase(test.NoDBTestCase):
self.assertEqual(2, mock_cinderclient.call_count)
@mock.patch('nova.volume.cinder.cinderclient')
+ def test_attachment_delete_gateway_timeout(self, mock_cinderclient):
+ mock_cinderclient.return_value.attachments.delete.side_effect = (
+ cinder_exception.ClientException(504))
+
+ self.assertRaises(cinder_exception.ClientException,
+ self.api.attachment_delete,
+ self.ctx, uuids.attachment_id)
+
+ self.assertEqual(5, mock_cinderclient.call_count)
+
+ @mock.patch('nova.volume.cinder.cinderclient')
+ def test_attachment_delete_gateway_timeout_do_not_raise(
+ self, mock_cinderclient):
+ # generate exception, and then have a normal return on the next retry
+ mock_cinderclient.return_value.attachments.delete.side_effect = [
+ cinder_exception.ClientException(504), None]
+
+ attachment_id = uuids.attachment
+ self.api.attachment_delete(self.ctx, attachment_id)
+
+ self.assertEqual(2, mock_cinderclient.call_count)
+
+ @mock.patch('nova.volume.cinder.cinderclient')
def test_attachment_delete_bad_request_exception(self, mock_cinderclient):
mock_cinderclient.return_value.attachments.delete.side_effect = (
cinder_exception.BadRequest(400))
@@ -1046,6 +1079,17 @@ class CinderApiTestCase(test.NoDBTestCase):
mock_volumes.get_encryption_metadata.assert_called_once_with(
{'encryption_key_id': 'fake_key'})
+ @mock.patch('nova.volume.cinder.cinderclient')
+ def test_volume_reimage(self, mock_cinderclient):
+ mock_reimage = mock.MagicMock()
+ mock_volumes = mock.MagicMock(reimage=mock_reimage)
+ mock_cinderclient.return_value = mock.MagicMock(volumes=mock_volumes)
+ self.api.reimage_volume(
+ self.ctx, uuids.volume_id, uuids.image_id,
+ reimage_reserved=True)
+ mock_cinderclient.assert_called_once_with(self.ctx, '3.68')
+ mock_reimage.assert_called_with(uuids.volume_id, uuids.image_id, True)
+
def test_translate_cinder_exception_no_error(self):
my_func = mock.Mock()
my_func.__name__ = 'my_func'